github.com/consideritdone/landslidecore@v0.0.0-20230718131026-a8b21c5cf8a7/test/maverick/node/node.go (about)

     1  package node
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"net"
     9  	"net/http"
    10  	_ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
    11  	"strconv"
    12  	"strings"
    13  	"time"
    14  
    15  	"github.com/prometheus/client_golang/prometheus"
    16  	"github.com/prometheus/client_golang/prometheus/promhttp"
    17  	"github.com/rs/cors"
    18  
    19  	dbm "github.com/tendermint/tm-db"
    20  
    21  	abci "github.com/consideritdone/landslidecore/abci/types"
    22  	bcv0 "github.com/consideritdone/landslidecore/blockchain/v0"
    23  	bcv1 "github.com/consideritdone/landslidecore/blockchain/v1"
    24  	bcv2 "github.com/consideritdone/landslidecore/blockchain/v2"
    25  	cfg "github.com/consideritdone/landslidecore/config"
    26  	"github.com/consideritdone/landslidecore/consensus"
    27  	"github.com/consideritdone/landslidecore/crypto"
    28  	"github.com/consideritdone/landslidecore/evidence"
    29  	tmjson "github.com/consideritdone/landslidecore/libs/json"
    30  	"github.com/consideritdone/landslidecore/libs/log"
    31  	tmpubsub "github.com/consideritdone/landslidecore/libs/pubsub"
    32  	"github.com/consideritdone/landslidecore/libs/service"
    33  	"github.com/consideritdone/landslidecore/light"
    34  	mempl "github.com/consideritdone/landslidecore/mempool"
    35  	"github.com/consideritdone/landslidecore/p2p"
    36  	"github.com/consideritdone/landslidecore/p2p/pex"
    37  	"github.com/consideritdone/landslidecore/privval"
    38  	"github.com/consideritdone/landslidecore/proxy"
    39  	rpccore "github.com/consideritdone/landslidecore/rpc/core"
    40  	grpccore "github.com/consideritdone/landslidecore/rpc/grpc"
    41  	rpcserver "github.com/consideritdone/landslidecore/rpc/jsonrpc/server"
    42  	sm "github.com/consideritdone/landslidecore/state"
    43  	"github.com/consideritdone/landslidecore/state/indexer"
    44  	blockidxkv "github.com/consideritdone/landslidecore/state/indexer/block/kv"
    45  	blockidxnull "github.com/consideritdone/landslidecore/state/indexer/block/null"
    46  	"github.com/consideritdone/landslidecore/state/txindex"
    47  	"github.com/consideritdone/landslidecore/state/txindex/kv"
    48  	"github.com/consideritdone/landslidecore/state/txindex/null"
    49  	"github.com/consideritdone/landslidecore/statesync"
    50  	"github.com/consideritdone/landslidecore/store"
    51  	cs "github.com/consideritdone/landslidecore/test/maverick/consensus"
    52  	"github.com/consideritdone/landslidecore/types"
    53  	tmtime "github.com/consideritdone/landslidecore/types/time"
    54  	"github.com/consideritdone/landslidecore/version"
    55  )
    56  
    57  //------------------------------------------------------------------------------
    58  
    59  // ParseMisbehaviors is a util function that converts a comma separated string into
    60  // a map of misbehaviors to be executed by the maverick node
    61  func ParseMisbehaviors(str string) (map[int64]cs.Misbehavior, error) {
    62  	// check if string is empty in which case we run a normal node
    63  	var misbehaviors = make(map[int64]cs.Misbehavior)
    64  	if str == "" {
    65  		return misbehaviors, nil
    66  	}
    67  	strs := strings.Split(str, ",")
    68  	if len(strs)%2 != 0 {
    69  		return misbehaviors, errors.New("missing either height or misbehavior name in the misbehavior flag")
    70  	}
    71  OUTER_LOOP:
    72  	for i := 0; i < len(strs); i += 2 {
    73  		height, err := strconv.ParseInt(strs[i+1], 10, 64)
    74  		if err != nil {
    75  			return misbehaviors, fmt.Errorf("failed to parse misbehavior height: %w", err)
    76  		}
    77  		for key, misbehavior := range cs.MisbehaviorList {
    78  			if key == strs[i] {
    79  				misbehaviors[height] = misbehavior
    80  				continue OUTER_LOOP
    81  			}
    82  		}
    83  		return misbehaviors, fmt.Errorf("received unknown misbehavior: %s. Did you forget to add it?", strs[i])
    84  	}
    85  
    86  	return misbehaviors, nil
    87  }
    88  
    89  // DBContext specifies config information for loading a new DB.
    90  type DBContext struct {
    91  	ID     string
    92  	Config *cfg.Config
    93  }
    94  
    95  // DBProvider takes a DBContext and returns an instantiated DB.
    96  type DBProvider func(*DBContext) (dbm.DB, error)
    97  
    98  // DefaultDBProvider returns a database using the DBBackend and DBDir
    99  // specified in the ctx.Config.
   100  func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
   101  	dbType := dbm.BackendType(ctx.Config.DBBackend)
   102  	return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
   103  }
   104  
   105  // GenesisDocProvider returns a GenesisDoc.
   106  // It allows the GenesisDoc to be pulled from sources other than the
   107  // filesystem, for instance from a distributed key-value store cluster.
   108  type GenesisDocProvider func() (*types.GenesisDoc, error)
   109  
   110  // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
   111  // the GenesisDoc from the config.GenesisFile() on the filesystem.
   112  func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
   113  	return func() (*types.GenesisDoc, error) {
   114  		return types.GenesisDocFromFile(config.GenesisFile())
   115  	}
   116  }
   117  
   118  // Provider takes a config and a logger and returns a ready to go Node.
   119  type Provider func(*cfg.Config, log.Logger) (*Node, error)
   120  
   121  // DefaultNewNode returns a Tendermint node with default settings for the
   122  // PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
   123  // It implements NodeProvider.
   124  func DefaultNewNode(config *cfg.Config, logger log.Logger, misbehaviors map[int64]cs.Misbehavior) (*Node, error) {
   125  	nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
   126  	if err != nil {
   127  		return nil, fmt.Errorf("failed to load or gen node key %s, err: %w", config.NodeKeyFile(), err)
   128  	}
   129  
   130  	return NewNode(config,
   131  		LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()),
   132  		nodeKey,
   133  		proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
   134  		DefaultGenesisDocProviderFunc(config),
   135  		DefaultDBProvider,
   136  		DefaultMetricsProvider(config.Instrumentation),
   137  		logger,
   138  		misbehaviors,
   139  	)
   140  
   141  }
   142  
   143  // MetricsProvider returns a consensus, p2p and mempool Metrics.
   144  type MetricsProvider func(chainID string) (*consensus.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics)
   145  
   146  // DefaultMetricsProvider returns Metrics build using Prometheus client library
   147  // if Prometheus is enabled. Otherwise, it returns no-op Metrics.
   148  func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
   149  	return func(chainID string) (*consensus.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) {
   150  		if config.Prometheus {
   151  			return consensus.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   152  				p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   153  				mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   154  				sm.PrometheusMetrics(config.Namespace, "chain_id", chainID)
   155  		}
   156  		return consensus.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics()
   157  	}
   158  }
   159  
   160  // Option sets a parameter for the node.
   161  type Option func(*Node)
   162  
   163  // Temporary interface for switching to fast sync, we should get rid of v0 and v1 reactors.
   164  // See: https://github.com/consideritdone/landslidecore/issues/4595
   165  type fastSyncReactor interface {
   166  	SwitchToFastSync(sm.State) error
   167  }
   168  
   169  // CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to
   170  // the node's Switch.
   171  //
   172  // WARNING: using any name from the below list of the existing reactors will
   173  // result in replacing it with the custom one.
   174  //
   175  //   - MEMPOOL
   176  //   - BLOCKCHAIN
   177  //   - CONSENSUS
   178  //   - EVIDENCE
   179  //   - PEX
   180  //   - STATESYNC
   181  func CustomReactors(reactors map[string]p2p.Reactor) Option {
   182  	return func(n *Node) {
   183  		for name, reactor := range reactors {
   184  			if existingReactor := n.sw.Reactor(name); existingReactor != nil {
   185  				n.sw.Logger.Info("Replacing existing reactor with a custom one",
   186  					"name", name, "existing", existingReactor, "custom", reactor)
   187  				n.sw.RemoveReactor(name, existingReactor)
   188  			}
   189  			n.sw.AddReactor(name, reactor)
   190  		}
   191  	}
   192  }
   193  
   194  func CustomReactorsAsConstructors(reactors map[string]func(n *Node) p2p.Reactor) Option {
   195  	return func(n *Node) {
   196  		for name, customReactor := range reactors {
   197  			if existingReactor := n.sw.Reactor(name); existingReactor != nil {
   198  				n.sw.Logger.Info("Replacing existing reactor with a custom one",
   199  					"name", name)
   200  				n.sw.RemoveReactor(name, existingReactor)
   201  			}
   202  			n.sw.AddReactor(name, customReactor(n))
   203  		}
   204  	}
   205  }
   206  
   207  // StateProvider overrides the state provider used by state sync to retrieve trusted app hashes and
   208  // build a State object for bootstrapping the node.
   209  // WARNING: this interface is considered unstable and subject to change.
   210  func StateProvider(stateProvider statesync.StateProvider) Option {
   211  	return func(n *Node) {
   212  		n.stateSyncProvider = stateProvider
   213  	}
   214  }
   215  
   216  //------------------------------------------------------------------------------
   217  
   218  // Node is the highest level interface to a full Tendermint node.
   219  // It includes all configuration information and running services.
   220  type Node struct {
   221  	service.BaseService
   222  
   223  	// config
   224  	config        *cfg.Config
   225  	genesisDoc    *types.GenesisDoc   // initial validator set
   226  	privValidator types.PrivValidator // local node's validator key
   227  
   228  	// network
   229  	transport   *p2p.MultiplexTransport
   230  	sw          *p2p.Switch  // p2p connections
   231  	addrBook    pex.AddrBook // known peers
   232  	nodeInfo    p2p.NodeInfo
   233  	nodeKey     *p2p.NodeKey // our node privkey
   234  	isListening bool
   235  
   236  	// services
   237  	eventBus          *types.EventBus // pub/sub for services
   238  	stateStore        sm.Store
   239  	blockStore        *store.BlockStore // store the blockchain to disk
   240  	bcReactor         p2p.Reactor       // for fast-syncing
   241  	mempoolReactor    *mempl.Reactor    // for gossipping transactions
   242  	mempool           mempl.Mempool
   243  	stateSync         bool                    // whether the node should state sync on startup
   244  	stateSyncReactor  *statesync.Reactor      // for hosting and restoring state sync snapshots
   245  	stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node
   246  	stateSyncGenesis  sm.State                // provides the genesis state for state sync
   247  	consensusState    *cs.State               // latest consensus state
   248  	consensusReactor  *cs.Reactor             // for participating in the consensus
   249  	pexReactor        *pex.Reactor            // for exchanging peer addresses
   250  	evidencePool      *evidence.Pool          // tracking evidence
   251  	proxyApp          proxy.AppConns          // connection to the application
   252  	rpcListeners      []net.Listener          // rpc servers
   253  	txIndexer         txindex.TxIndexer
   254  	blockIndexer      indexer.BlockIndexer
   255  	indexerService    *txindex.IndexerService
   256  	prometheusSrv     *http.Server
   257  }
   258  
   259  func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
   260  	var blockStoreDB dbm.DB
   261  	blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
   262  	if err != nil {
   263  		return
   264  	}
   265  	blockStore = store.NewBlockStore(blockStoreDB)
   266  
   267  	stateDB, err = dbProvider(&DBContext{"state", config})
   268  	if err != nil {
   269  		return
   270  	}
   271  
   272  	return
   273  }
   274  
   275  func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) {
   276  	proxyApp := proxy.NewAppConns(clientCreator)
   277  	proxyApp.SetLogger(logger.With("module", "proxy"))
   278  	if err := proxyApp.Start(); err != nil {
   279  		return nil, fmt.Errorf("error starting proxy app connections: %v", err)
   280  	}
   281  	return proxyApp, nil
   282  }
   283  
   284  func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
   285  	eventBus := types.NewEventBus()
   286  	eventBus.SetLogger(logger.With("module", "events"))
   287  	if err := eventBus.Start(); err != nil {
   288  		return nil, err
   289  	}
   290  	return eventBus, nil
   291  }
   292  
   293  func createAndStartIndexerService(
   294  	config *cfg.Config,
   295  	dbProvider DBProvider,
   296  	eventBus *types.EventBus,
   297  	logger log.Logger,
   298  ) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) {
   299  
   300  	var (
   301  		txIndexer    txindex.TxIndexer
   302  		blockIndexer indexer.BlockIndexer
   303  	)
   304  
   305  	switch config.TxIndex.Indexer {
   306  	case "kv":
   307  		store, err := dbProvider(&DBContext{"tx_index", config})
   308  		if err != nil {
   309  			return nil, nil, nil, err
   310  		}
   311  
   312  		txIndexer = kv.NewTxIndex(store)
   313  		blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events")))
   314  	default:
   315  		txIndexer = &null.TxIndex{}
   316  		blockIndexer = &blockidxnull.BlockerIndexer{}
   317  	}
   318  
   319  	indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus)
   320  	indexerService.SetLogger(logger.With("module", "txindex"))
   321  
   322  	if err := indexerService.Start(); err != nil {
   323  		return nil, nil, nil, err
   324  	}
   325  
   326  	return indexerService, txIndexer, blockIndexer, nil
   327  }
   328  
   329  func doHandshake(
   330  	stateStore sm.Store,
   331  	state sm.State,
   332  	blockStore sm.BlockStore,
   333  	genDoc *types.GenesisDoc,
   334  	eventBus types.BlockEventPublisher,
   335  	proxyApp proxy.AppConns,
   336  	consensusLogger log.Logger) error {
   337  
   338  	handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc)
   339  	handshaker.SetLogger(consensusLogger)
   340  	handshaker.SetEventBus(eventBus)
   341  	if err := handshaker.Handshake(proxyApp); err != nil {
   342  		return fmt.Errorf("error during handshake: %v", err)
   343  	}
   344  	return nil
   345  }
   346  
   347  func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) {
   348  	// Log the version info.
   349  	logger.Info("Version info",
   350  		"software", version.TMCoreSemVer,
   351  		"block", version.BlockProtocol,
   352  		"p2p", version.P2PProtocol,
   353  	)
   354  
   355  	// If the state and software differ in block version, at least log it.
   356  	if state.Version.Consensus.Block != version.BlockProtocol {
   357  		logger.Info("Software and state have different block protocols",
   358  			"software", version.BlockProtocol,
   359  			"state", state.Version.Consensus.Block,
   360  		)
   361  	}
   362  
   363  	addr := pubKey.Address()
   364  	// Log whether this node is a validator or an observer
   365  	if state.Validators.HasAddress(addr) {
   366  		consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey)
   367  	} else {
   368  		consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
   369  	}
   370  }
   371  
   372  func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
   373  	if state.Validators.Size() > 1 {
   374  		return false
   375  	}
   376  	addr, _ := state.Validators.GetByIndex(0)
   377  	return bytes.Equal(pubKey.Address(), addr)
   378  }
   379  
   380  func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns,
   381  	state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (*mempl.Reactor, *mempl.CListMempool) {
   382  
   383  	mempool := mempl.NewCListMempool(
   384  		config.Mempool,
   385  		proxyApp.Mempool(),
   386  		state.LastBlockHeight,
   387  		nil,
   388  		mempl.WithMetrics(memplMetrics),
   389  		mempl.WithPreCheck(sm.TxPreCheck(state)),
   390  		mempl.WithPostCheck(sm.TxPostCheck(state)),
   391  	)
   392  	mempoolLogger := logger.With("module", "mempool")
   393  	mempoolReactor := mempl.NewReactor(config.Mempool, mempool)
   394  	mempoolReactor.SetLogger(mempoolLogger)
   395  
   396  	if config.Consensus.WaitForTxs() {
   397  		mempool.EnableTxsAvailable()
   398  	}
   399  	return mempoolReactor, mempool
   400  }
   401  
   402  func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
   403  	stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) {
   404  
   405  	evidenceDB, err := dbProvider(&DBContext{"evidence", config})
   406  	if err != nil {
   407  		return nil, nil, err
   408  	}
   409  	evidenceLogger := logger.With("module", "evidence")
   410  	evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB), blockStore)
   411  	if err != nil {
   412  		return nil, nil, err
   413  	}
   414  	evidenceReactor := evidence.NewReactor(evidencePool)
   415  	evidenceReactor.SetLogger(evidenceLogger)
   416  	return evidenceReactor, evidencePool, nil
   417  }
   418  
   419  func createBlockchainReactor(config *cfg.Config,
   420  	state sm.State,
   421  	blockExec *sm.BlockExecutor,
   422  	blockStore *store.BlockStore,
   423  	fastSync bool,
   424  	logger log.Logger) (bcReactor p2p.Reactor, err error) {
   425  
   426  	switch config.FastSync.Version {
   427  	case "v0":
   428  		bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   429  	case "v1":
   430  		bcReactor = bcv1.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   431  	case "v2":
   432  		bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   433  	default:
   434  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
   435  	}
   436  
   437  	bcReactor.SetLogger(logger.With("module", "blockchain"))
   438  	return bcReactor, nil
   439  }
   440  
   441  func createConsensusReactor(config *cfg.Config,
   442  	state sm.State,
   443  	blockExec *sm.BlockExecutor,
   444  	blockStore sm.BlockStore,
   445  	mempool *mempl.CListMempool,
   446  	evidencePool *evidence.Pool,
   447  	privValidator types.PrivValidator,
   448  	csMetrics *consensus.Metrics,
   449  	waitSync bool,
   450  	eventBus *types.EventBus,
   451  	consensusLogger log.Logger,
   452  	misbehaviors map[int64]cs.Misbehavior) (*cs.Reactor, *cs.State) {
   453  
   454  	consensusState := cs.NewState(
   455  		config.Consensus,
   456  		state.Copy(),
   457  		blockExec,
   458  		blockStore,
   459  		mempool,
   460  		evidencePool,
   461  		misbehaviors,
   462  		cs.StateMetrics(csMetrics),
   463  	)
   464  	consensusState.SetLogger(consensusLogger)
   465  	if privValidator != nil {
   466  		consensusState.SetPrivValidator(privValidator)
   467  	}
   468  	consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics))
   469  	consensusReactor.SetLogger(consensusLogger)
   470  	// services which will be publishing and/or subscribing for messages (events)
   471  	// consensusReactor will set it on consensusState and blockExecutor
   472  	consensusReactor.SetEventBus(eventBus)
   473  	return consensusReactor, consensusState
   474  }
   475  
   476  func createTransport(
   477  	config *cfg.Config,
   478  	nodeInfo p2p.NodeInfo,
   479  	nodeKey *p2p.NodeKey,
   480  	proxyApp proxy.AppConns,
   481  ) (
   482  	*p2p.MultiplexTransport,
   483  	[]p2p.PeerFilterFunc,
   484  ) {
   485  	var (
   486  		mConnConfig = p2p.MConnConfig(config.P2P)
   487  		transport   = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig)
   488  		connFilters = []p2p.ConnFilterFunc{}
   489  		peerFilters = []p2p.PeerFilterFunc{}
   490  	)
   491  
   492  	if !config.P2P.AllowDuplicateIP {
   493  		connFilters = append(connFilters, p2p.ConnDuplicateIPFilter())
   494  	}
   495  
   496  	// Filter peers by addr or pubkey with an ABCI query.
   497  	// If the query return code is OK, add peer.
   498  	if config.FilterPeers {
   499  		connFilters = append(
   500  			connFilters,
   501  			// ABCI query for address filtering.
   502  			func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
   503  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   504  					Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
   505  				})
   506  				if err != nil {
   507  					return err
   508  				}
   509  				if res.IsErr() {
   510  					return fmt.Errorf("error querying abci app: %v", res)
   511  				}
   512  
   513  				return nil
   514  			},
   515  		)
   516  
   517  		peerFilters = append(
   518  			peerFilters,
   519  			// ABCI query for ID filtering.
   520  			func(_ p2p.IPeerSet, p p2p.Peer) error {
   521  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   522  					Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
   523  				})
   524  				if err != nil {
   525  					return err
   526  				}
   527  				if res.IsErr() {
   528  					return fmt.Errorf("error querying abci app: %v", res)
   529  				}
   530  
   531  				return nil
   532  			},
   533  		)
   534  	}
   535  
   536  	p2p.MultiplexTransportConnFilters(connFilters...)(transport)
   537  
   538  	// Limit the number of incoming connections.
   539  	max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   540  	p2p.MultiplexTransportMaxIncomingConnections(max)(transport)
   541  
   542  	return transport, peerFilters
   543  }
   544  
   545  func createSwitch(config *cfg.Config,
   546  	transport p2p.Transport,
   547  	p2pMetrics *p2p.Metrics,
   548  	peerFilters []p2p.PeerFilterFunc,
   549  	mempoolReactor *mempl.Reactor,
   550  	bcReactor p2p.Reactor,
   551  	stateSyncReactor *statesync.Reactor,
   552  	consensusReactor *cs.Reactor,
   553  	evidenceReactor *evidence.Reactor,
   554  	nodeInfo p2p.NodeInfo,
   555  	nodeKey *p2p.NodeKey,
   556  	p2pLogger log.Logger) *p2p.Switch {
   557  
   558  	sw := p2p.NewSwitch(
   559  		config.P2P,
   560  		transport,
   561  		p2p.WithMetrics(p2pMetrics),
   562  		p2p.SwitchPeerFilters(peerFilters...),
   563  	)
   564  	sw.SetLogger(p2pLogger)
   565  	sw.AddReactor("MEMPOOL", mempoolReactor)
   566  	sw.AddReactor("BLOCKCHAIN", bcReactor)
   567  	sw.AddReactor("CONSENSUS", consensusReactor)
   568  	sw.AddReactor("EVIDENCE", evidenceReactor)
   569  	sw.AddReactor("STATESYNC", stateSyncReactor)
   570  
   571  	sw.SetNodeInfo(nodeInfo)
   572  	sw.SetNodeKey(nodeKey)
   573  
   574  	p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
   575  	return sw
   576  }
   577  
   578  func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
   579  	p2pLogger log.Logger, nodeKey *p2p.NodeKey) (pex.AddrBook, error) {
   580  
   581  	addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
   582  	addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
   583  
   584  	// Add ourselves to addrbook to prevent dialing ourselves
   585  	if config.P2P.ExternalAddress != "" {
   586  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress))
   587  		if err != nil {
   588  			return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err)
   589  		}
   590  		addrBook.AddOurAddress(addr)
   591  	}
   592  	if config.P2P.ListenAddress != "" {
   593  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress))
   594  		if err != nil {
   595  			return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err)
   596  		}
   597  		addrBook.AddOurAddress(addr)
   598  	}
   599  
   600  	sw.SetAddrBook(addrBook)
   601  
   602  	return addrBook, nil
   603  }
   604  
   605  func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
   606  	sw *p2p.Switch, logger log.Logger) *pex.Reactor {
   607  
   608  	// TODO persistent peers ? so we can have their DNS addrs saved
   609  	pexReactor := pex.NewReactor(addrBook,
   610  		&pex.ReactorConfig{
   611  			Seeds:    splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
   612  			SeedMode: config.P2P.SeedMode,
   613  			// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
   614  			// blocks assuming 10s blocks ~ 28 hours.
   615  			// TODO (melekes): make it dynamic based on the actual block latencies
   616  			// from the live network.
   617  			// https://github.com/consideritdone/landslidecore/issues/3523
   618  			SeedDisconnectWaitPeriod:     28 * time.Hour,
   619  			PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
   620  		})
   621  	pexReactor.SetLogger(logger.With("module", "pex"))
   622  	sw.AddReactor("PEX", pexReactor)
   623  	return pexReactor
   624  }
   625  
   626  // startStateSync starts an asynchronous state sync process, then switches to fast sync mode.
   627  func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor,
   628  	stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool,
   629  	stateStore sm.Store, blockStore *store.BlockStore, state sm.State) error {
   630  	ssR.Logger.Info("Starting state sync")
   631  
   632  	if stateProvider == nil {
   633  		var err error
   634  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   635  		defer cancel()
   636  		stateProvider, err = statesync.NewLightClientStateProvider(
   637  			ctx,
   638  			state.ChainID, state.Version, state.InitialHeight,
   639  			config.RPCServers, light.TrustOptions{
   640  				Period: config.TrustPeriod,
   641  				Height: config.TrustHeight,
   642  				Hash:   config.TrustHashBytes(),
   643  			}, ssR.Logger.With("module", "light"))
   644  		if err != nil {
   645  			return fmt.Errorf("failed to set up light client state provider: %w", err)
   646  		}
   647  	}
   648  
   649  	go func() {
   650  		state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime)
   651  		if err != nil {
   652  			ssR.Logger.Error("State sync failed", "err", err)
   653  			return
   654  		}
   655  		err = stateStore.Bootstrap(state)
   656  		if err != nil {
   657  			ssR.Logger.Error("Failed to bootstrap node with new state", "err", err)
   658  			return
   659  		}
   660  		err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit)
   661  		if err != nil {
   662  			ssR.Logger.Error("Failed to store last seen commit", "err", err)
   663  			return
   664  		}
   665  
   666  		if fastSync {
   667  			// FIXME Very ugly to have these metrics bleed through here.
   668  			conR.Metrics.StateSyncing.Set(0)
   669  			conR.Metrics.FastSyncing.Set(1)
   670  			err = bcR.SwitchToFastSync(state)
   671  			if err != nil {
   672  				ssR.Logger.Error("Failed to switch to fast sync", "err", err)
   673  				return
   674  			}
   675  		} else {
   676  			conR.SwitchToConsensus(state, true)
   677  		}
   678  	}()
   679  	return nil
   680  }
   681  
   682  // NewNode returns a new, ready to go, Tendermint Node.
   683  func NewNode(config *cfg.Config,
   684  	privValidator types.PrivValidator,
   685  	nodeKey *p2p.NodeKey,
   686  	clientCreator proxy.ClientCreator,
   687  	genesisDocProvider GenesisDocProvider,
   688  	dbProvider DBProvider,
   689  	metricsProvider MetricsProvider,
   690  	logger log.Logger,
   691  	misbehaviors map[int64]cs.Misbehavior,
   692  	options ...Option) (*Node, error) {
   693  
   694  	blockStore, stateDB, err := initDBs(config, dbProvider)
   695  	if err != nil {
   696  		return nil, err
   697  	}
   698  
   699  	stateStore := sm.NewStore(stateDB)
   700  
   701  	state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
   702  	if err != nil {
   703  		return nil, err
   704  	}
   705  
   706  	// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
   707  	proxyApp, err := createAndStartProxyAppConns(clientCreator, logger)
   708  	if err != nil {
   709  		return nil, err
   710  	}
   711  
   712  	// EventBus and IndexerService must be started before the handshake because
   713  	// we might need to index the txs of the replayed block as this might not have happened
   714  	// when the node stopped last time (i.e. the node stopped after it saved the block
   715  	// but before it indexed the txs, or, endblocker panicked)
   716  	eventBus, err := createAndStartEventBus(logger)
   717  	if err != nil {
   718  		return nil, err
   719  	}
   720  
   721  	indexerService, txIndexer, blockIndexer, err := createAndStartIndexerService(config, dbProvider, eventBus, logger)
   722  	if err != nil {
   723  		return nil, err
   724  	}
   725  
   726  	// If an address is provided, listen on the socket for a connection from an
   727  	// external signing process.
   728  	if config.PrivValidatorListenAddr != "" {
   729  		// FIXME: we should start services inside OnStart
   730  		privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, genDoc.ChainID, logger)
   731  		if err != nil {
   732  			return nil, fmt.Errorf("error with private validator socket client: %w", err)
   733  		}
   734  	}
   735  
   736  	pubKey, err := privValidator.GetPubKey()
   737  	if err != nil {
   738  		return nil, fmt.Errorf("can't get pubkey: %w", err)
   739  	}
   740  
   741  	// Determine whether we should do state and/or fast sync.
   742  	// We don't fast-sync when the only validator is us.
   743  	fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey)
   744  	stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey)
   745  	if stateSync && state.LastBlockHeight > 0 {
   746  		logger.Info("Found local state with non-zero height, skipping state sync")
   747  		stateSync = false
   748  	}
   749  
   750  	// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
   751  	// and replays any blocks as necessary to sync tendermint with the app.
   752  	consensusLogger := logger.With("module", "consensus")
   753  	if !stateSync {
   754  		if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil {
   755  			return nil, err
   756  		}
   757  
   758  		// Reload the state. It will have the Version.Consensus.App set by the
   759  		// Handshake, and may have other modifications as well (ie. depending on
   760  		// what happened during block replay).
   761  		state, err = stateStore.Load()
   762  		if err != nil {
   763  			return nil, fmt.Errorf("cannot load state: %w", err)
   764  		}
   765  	}
   766  
   767  	logNodeStartupInfo(state, pubKey, logger, consensusLogger)
   768  
   769  	csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID)
   770  
   771  	// Make MempoolReactor
   772  	mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
   773  
   774  	// Make Evidence Reactor
   775  	evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger)
   776  	if err != nil {
   777  		return nil, err
   778  	}
   779  
   780  	// make block executor for consensus and blockchain reactors to execute blocks
   781  	blockExec := sm.NewBlockExecutor(
   782  		stateStore,
   783  		logger.With("module", "state"),
   784  		proxyApp.Consensus(),
   785  		mempool,
   786  		evidencePool,
   787  		sm.BlockExecutorWithMetrics(smMetrics),
   788  	)
   789  
   790  	// Make BlockchainReactor. Don't start fast sync if we're doing a state sync first.
   791  	bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync && !stateSync, logger)
   792  	if err != nil {
   793  		return nil, fmt.Errorf("could not create blockchain reactor: %w", err)
   794  	}
   795  
   796  	// Make ConsensusReactor. Don't enable fully if doing a state sync and/or fast sync first.
   797  	// FIXME We need to update metrics here, since other reactors don't have access to them.
   798  	if stateSync {
   799  		csMetrics.StateSyncing.Set(1)
   800  	} else if fastSync {
   801  		csMetrics.FastSyncing.Set(1)
   802  	}
   803  
   804  	logger.Info("Setting up maverick consensus reactor", "Misbehaviors", misbehaviors)
   805  	consensusReactor, consensusState := createConsensusReactor(
   806  		config, state, blockExec, blockStore, mempool, evidencePool,
   807  		privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger, misbehaviors)
   808  
   809  	// Set up state sync reactor, and schedule a sync if requested.
   810  	// FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy,
   811  	// we should clean this whole thing up. See:
   812  	// https://github.com/consideritdone/landslidecore/issues/4644
   813  	stateSyncReactor := statesync.NewReactor(
   814  		*config.StateSync,
   815  		proxyApp.Snapshot(),
   816  		proxyApp.Query(),
   817  		config.StateSync.TempDir,
   818  	)
   819  	stateSyncReactor.SetLogger(logger.With("module", "statesync"))
   820  
   821  	nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state)
   822  	if err != nil {
   823  		return nil, err
   824  	}
   825  
   826  	// Setup Transport.
   827  	transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp)
   828  
   829  	// Setup Switch.
   830  	p2pLogger := logger.With("module", "p2p")
   831  	sw := createSwitch(
   832  		config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor,
   833  		stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger,
   834  	)
   835  
   836  	err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
   837  	if err != nil {
   838  		return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err)
   839  	}
   840  
   841  	err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   842  	if err != nil {
   843  		return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
   844  	}
   845  
   846  	addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
   847  	if err != nil {
   848  		return nil, fmt.Errorf("could not create addrbook: %w", err)
   849  	}
   850  
   851  	// Optionally, start the pex reactor
   852  	//
   853  	// TODO:
   854  	//
   855  	// We need to set Seeds and PersistentPeers on the switch,
   856  	// since it needs to be able to use these (and their DNS names)
   857  	// even if the PEX is off. We can include the DNS name in the NetAddress,
   858  	// but it would still be nice to have a clear list of the current "PersistentPeers"
   859  	// somewhere that we can return with net_info.
   860  	//
   861  	// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
   862  	// Note we currently use the addrBook regardless at least for AddOurAddress
   863  	var pexReactor *pex.Reactor
   864  	if config.P2P.PexReactor {
   865  		pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
   866  	}
   867  
   868  	if config.RPC.PprofListenAddress != "" {
   869  		go func() {
   870  			logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress)
   871  			logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil))
   872  		}()
   873  	}
   874  
   875  	node := &Node{
   876  		config:        config,
   877  		genesisDoc:    genDoc,
   878  		privValidator: privValidator,
   879  
   880  		transport: transport,
   881  		sw:        sw,
   882  		addrBook:  addrBook,
   883  		nodeInfo:  nodeInfo,
   884  		nodeKey:   nodeKey,
   885  
   886  		stateStore:       stateStore,
   887  		blockStore:       blockStore,
   888  		bcReactor:        bcReactor,
   889  		mempoolReactor:   mempoolReactor,
   890  		mempool:          mempool,
   891  		consensusState:   consensusState,
   892  		consensusReactor: consensusReactor,
   893  		stateSyncReactor: stateSyncReactor,
   894  		stateSync:        stateSync,
   895  		stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state
   896  		pexReactor:       pexReactor,
   897  		evidencePool:     evidencePool,
   898  		proxyApp:         proxyApp,
   899  		txIndexer:        txIndexer,
   900  		indexerService:   indexerService,
   901  		blockIndexer:     blockIndexer,
   902  		eventBus:         eventBus,
   903  	}
   904  	node.BaseService = *service.NewBaseService(logger, "Node", node)
   905  
   906  	for _, option := range options {
   907  		option(node)
   908  	}
   909  
   910  	return node, nil
   911  }
   912  
   913  // OnStart starts the Node. It implements service.Service.
   914  func (n *Node) OnStart() error {
   915  	now := tmtime.Now()
   916  	genTime := n.genesisDoc.GenesisTime
   917  	if genTime.After(now) {
   918  		n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime)
   919  		time.Sleep(genTime.Sub(now))
   920  	}
   921  
   922  	// Add private IDs to addrbook to block those peers being added
   923  	n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
   924  
   925  	// Start the RPC server before the P2P server
   926  	// so we can eg. receive txs for the first block
   927  	if n.config.RPC.ListenAddress != "" {
   928  		listeners, err := n.startRPC()
   929  		if err != nil {
   930  			return err
   931  		}
   932  		n.rpcListeners = listeners
   933  	}
   934  
   935  	if n.config.Instrumentation.Prometheus &&
   936  		n.config.Instrumentation.PrometheusListenAddr != "" {
   937  		n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
   938  	}
   939  
   940  	// Start the transport.
   941  	addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress))
   942  	if err != nil {
   943  		return err
   944  	}
   945  	if err := n.transport.Listen(*addr); err != nil {
   946  		return err
   947  	}
   948  
   949  	n.isListening = true
   950  
   951  	if n.config.Mempool.WalEnabled() {
   952  		err = n.mempool.InitWAL()
   953  		if err != nil {
   954  			return fmt.Errorf("init mempool WAL: %w", err)
   955  		}
   956  	}
   957  
   958  	// Start the switch (the P2P server).
   959  	err = n.sw.Start()
   960  	if err != nil {
   961  		return err
   962  	}
   963  
   964  	// Always connect to persistent peers
   965  	err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
   966  	if err != nil {
   967  		return fmt.Errorf("could not dial peers from persistent_peers field: %w", err)
   968  	}
   969  
   970  	// Run state sync
   971  	if n.stateSync {
   972  		bcR, ok := n.bcReactor.(fastSyncReactor)
   973  		if !ok {
   974  			return fmt.Errorf("this blockchain reactor does not support switching from state sync")
   975  		}
   976  		err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider,
   977  			n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis)
   978  		if err != nil {
   979  			return fmt.Errorf("failed to start state sync: %w", err)
   980  		}
   981  	}
   982  
   983  	return nil
   984  }
   985  
   986  // OnStop stops the Node. It implements service.Service.
   987  func (n *Node) OnStop() {
   988  	n.BaseService.OnStop()
   989  
   990  	n.Logger.Info("Stopping Node")
   991  
   992  	// first stop the non-reactor services
   993  	if err := n.eventBus.Stop(); err != nil {
   994  		n.Logger.Error("Error closing eventBus", "err", err)
   995  	}
   996  	if err := n.indexerService.Stop(); err != nil {
   997  		n.Logger.Error("Error closing indexerService", "err", err)
   998  	}
   999  
  1000  	// now stop the reactors
  1001  	if err := n.sw.Stop(); err != nil {
  1002  		n.Logger.Error("Error closing switch", "err", err)
  1003  	}
  1004  
  1005  	// stop mempool WAL
  1006  	if n.config.Mempool.WalEnabled() {
  1007  		n.mempool.CloseWAL()
  1008  	}
  1009  
  1010  	if err := n.transport.Close(); err != nil {
  1011  		n.Logger.Error("Error closing transport", "err", err)
  1012  	}
  1013  
  1014  	n.isListening = false
  1015  
  1016  	// finally stop the listeners / external services
  1017  	for _, l := range n.rpcListeners {
  1018  		n.Logger.Info("Closing rpc listener", "listener", l)
  1019  		if err := l.Close(); err != nil {
  1020  			n.Logger.Error("Error closing listener", "listener", l, "err", err)
  1021  		}
  1022  	}
  1023  
  1024  	if pvsc, ok := n.privValidator.(service.Service); ok {
  1025  		if err := pvsc.Stop(); err != nil {
  1026  			n.Logger.Error("Error closing private validator", "err", err)
  1027  		}
  1028  	}
  1029  
  1030  	if n.prometheusSrv != nil {
  1031  		if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
  1032  			// Error from closing listeners, or context timeout:
  1033  			n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
  1034  		}
  1035  	}
  1036  }
  1037  
  1038  // ConfigureRPC makes sure RPC has all the objects it needs to operate.
  1039  func (n *Node) ConfigureRPC() error {
  1040  	pubKey, err := n.privValidator.GetPubKey()
  1041  	if err != nil {
  1042  		return fmt.Errorf("can't get pubkey: %w", err)
  1043  	}
  1044  	rpccore.SetEnvironment(&rpccore.Environment{
  1045  		ProxyAppQuery:   n.proxyApp.Query(),
  1046  		ProxyAppMempool: n.proxyApp.Mempool(),
  1047  
  1048  		StateStore:     n.stateStore,
  1049  		BlockStore:     n.blockStore,
  1050  		EvidencePool:   n.evidencePool,
  1051  		ConsensusState: n.consensusState,
  1052  		P2PPeers:       n.sw,
  1053  		P2PTransport:   n,
  1054  
  1055  		PubKey:           pubKey,
  1056  		GenDoc:           n.genesisDoc,
  1057  		TxIndexer:        n.txIndexer,
  1058  		BlockIndexer:     n.blockIndexer,
  1059  		ConsensusReactor: &consensus.Reactor{},
  1060  		EventBus:         n.eventBus,
  1061  		Mempool:          n.mempool,
  1062  
  1063  		Logger: n.Logger.With("module", "rpc"),
  1064  
  1065  		Config: *n.config.RPC,
  1066  	})
  1067  	return nil
  1068  }
  1069  
  1070  func (n *Node) startRPC() ([]net.Listener, error) {
  1071  	err := n.ConfigureRPC()
  1072  	if err != nil {
  1073  		return nil, err
  1074  	}
  1075  
  1076  	listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
  1077  
  1078  	if n.config.RPC.Unsafe {
  1079  		rpccore.AddUnsafeRoutes()
  1080  	}
  1081  
  1082  	config := rpcserver.DefaultConfig()
  1083  	config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1084  	config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1085  	config.MaxOpenConnections = n.config.RPC.MaxOpenConnections
  1086  	// If necessary adjust global WriteTimeout to ensure it's greater than
  1087  	// TimeoutBroadcastTxCommit.
  1088  	// See https://github.com/consideritdone/landslidecore/issues/3435
  1089  	if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1090  		config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1091  	}
  1092  
  1093  	// we may expose the rpc over both a unix and tcp socket
  1094  	listeners := make([]net.Listener, len(listenAddrs))
  1095  	for i, listenAddr := range listenAddrs {
  1096  		mux := http.NewServeMux()
  1097  		rpcLogger := n.Logger.With("module", "rpc-server")
  1098  		wmLogger := rpcLogger.With("protocol", "websocket")
  1099  		wm := rpcserver.NewWebsocketManager(rpccore.Routes,
  1100  			rpcserver.OnDisconnect(func(remoteAddr string) {
  1101  				err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr)
  1102  				if err != nil && err != tmpubsub.ErrSubscriptionNotFound {
  1103  					wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err)
  1104  				}
  1105  			}),
  1106  			rpcserver.ReadLimit(config.MaxBodyBytes),
  1107  		)
  1108  		wm.SetLogger(wmLogger)
  1109  		mux.HandleFunc("/websocket", wm.WebsocketHandler)
  1110  		rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger)
  1111  		listener, err := rpcserver.Listen(
  1112  			listenAddr,
  1113  			config,
  1114  		)
  1115  		if err != nil {
  1116  			return nil, err
  1117  		}
  1118  
  1119  		var rootHandler http.Handler = mux
  1120  		if n.config.RPC.IsCorsEnabled() {
  1121  			corsMiddleware := cors.New(cors.Options{
  1122  				AllowedOrigins: n.config.RPC.CORSAllowedOrigins,
  1123  				AllowedMethods: n.config.RPC.CORSAllowedMethods,
  1124  				AllowedHeaders: n.config.RPC.CORSAllowedHeaders,
  1125  			})
  1126  			rootHandler = corsMiddleware.Handler(mux)
  1127  		}
  1128  		if n.config.RPC.IsTLSEnabled() {
  1129  			go func() {
  1130  				if err := rpcserver.ServeTLS(
  1131  					listener,
  1132  					rootHandler,
  1133  					n.config.RPC.CertFile(),
  1134  					n.config.RPC.KeyFile(),
  1135  					rpcLogger,
  1136  					config,
  1137  				); err != nil {
  1138  					n.Logger.Error("Error serving server with TLS", "err", err)
  1139  				}
  1140  			}()
  1141  		} else {
  1142  			go func() {
  1143  				if err := rpcserver.Serve(
  1144  					listener,
  1145  					rootHandler,
  1146  					rpcLogger,
  1147  					config,
  1148  				); err != nil {
  1149  					n.Logger.Error("Error serving server", "err", err)
  1150  				}
  1151  			}()
  1152  		}
  1153  
  1154  		listeners[i] = listener
  1155  	}
  1156  
  1157  	// we expose a simplified api over grpc for convenience to app devs
  1158  	grpcListenAddr := n.config.RPC.GRPCListenAddress
  1159  	if grpcListenAddr != "" {
  1160  		config := rpcserver.DefaultConfig()
  1161  		config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1162  		config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1163  		// NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections
  1164  		config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections
  1165  		// If necessary adjust global WriteTimeout to ensure it's greater than
  1166  		// TimeoutBroadcastTxCommit.
  1167  		// See https://github.com/consideritdone/landslidecore/issues/3435
  1168  		if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1169  			config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1170  		}
  1171  		listener, err := rpcserver.Listen(grpcListenAddr, config)
  1172  		if err != nil {
  1173  			return nil, err
  1174  		}
  1175  		go func() {
  1176  			if err := grpccore.StartGRPCServer(listener); err != nil {
  1177  				n.Logger.Error("Error starting gRPC server", "err", err)
  1178  			}
  1179  		}()
  1180  		listeners = append(listeners, listener)
  1181  	}
  1182  
  1183  	return listeners, nil
  1184  }
  1185  
  1186  // startPrometheusServer starts a Prometheus HTTP server, listening for metrics
  1187  // collectors on addr.
  1188  func (n *Node) startPrometheusServer(addr string) *http.Server {
  1189  	srv := &http.Server{
  1190  		Addr: addr,
  1191  		Handler: promhttp.InstrumentMetricHandler(
  1192  			prometheus.DefaultRegisterer, promhttp.HandlerFor(
  1193  				prometheus.DefaultGatherer,
  1194  				promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
  1195  			),
  1196  		),
  1197  	}
  1198  	go func() {
  1199  		if err := srv.ListenAndServe(); err != http.ErrServerClosed {
  1200  			// Error starting or closing listener:
  1201  			n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
  1202  		}
  1203  	}()
  1204  	return srv
  1205  }
  1206  
  1207  // Switch returns the Node's Switch.
  1208  func (n *Node) Switch() *p2p.Switch {
  1209  	return n.sw
  1210  }
  1211  
  1212  // BlockStore returns the Node's BlockStore.
  1213  func (n *Node) BlockStore() *store.BlockStore {
  1214  	return n.blockStore
  1215  }
  1216  
  1217  // ConsensusState returns the Node's ConsensusState.
  1218  func (n *Node) ConsensusState() *cs.State {
  1219  	return n.consensusState
  1220  }
  1221  
  1222  // ConsensusReactor returns the Node's ConsensusReactor.
  1223  func (n *Node) ConsensusReactor() *cs.Reactor {
  1224  	return n.consensusReactor
  1225  }
  1226  
  1227  // MempoolReactor returns the Node's mempool reactor.
  1228  func (n *Node) MempoolReactor() *mempl.Reactor {
  1229  	return n.mempoolReactor
  1230  }
  1231  
  1232  // Mempool returns the Node's mempool.
  1233  func (n *Node) Mempool() mempl.Mempool {
  1234  	return n.mempool
  1235  }
  1236  
  1237  // PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled.
  1238  func (n *Node) PEXReactor() *pex.Reactor {
  1239  	return n.pexReactor
  1240  }
  1241  
  1242  // EvidencePool returns the Node's EvidencePool.
  1243  func (n *Node) EvidencePool() *evidence.Pool {
  1244  	return n.evidencePool
  1245  }
  1246  
  1247  // EventBus returns the Node's EventBus.
  1248  func (n *Node) EventBus() *types.EventBus {
  1249  	return n.eventBus
  1250  }
  1251  
  1252  // PrivValidator returns the Node's PrivValidator.
  1253  // XXX: for convenience only!
  1254  func (n *Node) PrivValidator() types.PrivValidator {
  1255  	return n.privValidator
  1256  }
  1257  
  1258  // GenesisDoc returns the Node's GenesisDoc.
  1259  func (n *Node) GenesisDoc() *types.GenesisDoc {
  1260  	return n.genesisDoc
  1261  }
  1262  
  1263  // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
  1264  func (n *Node) ProxyApp() proxy.AppConns {
  1265  	return n.proxyApp
  1266  }
  1267  
  1268  // Config returns the Node's config.
  1269  func (n *Node) Config() *cfg.Config {
  1270  	return n.config
  1271  }
  1272  
  1273  //------------------------------------------------------------------------------
  1274  
  1275  func (n *Node) Listeners() []string {
  1276  	return []string{
  1277  		fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress),
  1278  	}
  1279  }
  1280  
  1281  func (n *Node) IsListening() bool {
  1282  	return n.isListening
  1283  }
  1284  
  1285  // NodeInfo returns the Node's Info from the Switch.
  1286  func (n *Node) NodeInfo() p2p.NodeInfo {
  1287  	return n.nodeInfo
  1288  }
  1289  
  1290  func makeNodeInfo(
  1291  	config *cfg.Config,
  1292  	nodeKey *p2p.NodeKey,
  1293  	txIndexer txindex.TxIndexer,
  1294  	genDoc *types.GenesisDoc,
  1295  	state sm.State,
  1296  ) (p2p.NodeInfo, error) {
  1297  	txIndexerStatus := "on"
  1298  	if _, ok := txIndexer.(*null.TxIndex); ok {
  1299  		txIndexerStatus = "off"
  1300  	}
  1301  
  1302  	var bcChannel byte
  1303  	switch config.FastSync.Version {
  1304  	case "v0":
  1305  		bcChannel = bcv0.BlockchainChannel
  1306  	case "v1":
  1307  		bcChannel = bcv1.BlockchainChannel
  1308  	case "v2":
  1309  		bcChannel = bcv2.BlockchainChannel
  1310  	default:
  1311  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
  1312  	}
  1313  
  1314  	nodeInfo := p2p.DefaultNodeInfo{
  1315  		ProtocolVersion: p2p.NewProtocolVersion(
  1316  			version.P2PProtocol, // global
  1317  			state.Version.Consensus.Block,
  1318  			state.Version.Consensus.App,
  1319  		),
  1320  		DefaultNodeID: nodeKey.ID(),
  1321  		Network:       genDoc.ChainID,
  1322  		Version:       version.TMCoreSemVer,
  1323  		Channels: []byte{
  1324  			bcChannel,
  1325  			cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
  1326  			mempl.MempoolChannel,
  1327  			evidence.EvidenceChannel,
  1328  			statesync.SnapshotChannel, statesync.ChunkChannel,
  1329  		},
  1330  		Moniker: config.Moniker,
  1331  		Other: p2p.DefaultNodeInfoOther{
  1332  			TxIndex:    txIndexerStatus,
  1333  			RPCAddress: config.RPC.ListenAddress,
  1334  		},
  1335  	}
  1336  
  1337  	if config.P2P.PexReactor {
  1338  		nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  1339  	}
  1340  
  1341  	lAddr := config.P2P.ExternalAddress
  1342  
  1343  	if lAddr == "" {
  1344  		lAddr = config.P2P.ListenAddress
  1345  	}
  1346  
  1347  	nodeInfo.ListenAddr = lAddr
  1348  
  1349  	err := nodeInfo.Validate()
  1350  	return nodeInfo, err
  1351  }
  1352  
  1353  //------------------------------------------------------------------------------
  1354  
  1355  var (
  1356  	genesisDocKey = []byte("genesisDoc")
  1357  )
  1358  
  1359  // LoadStateFromDBOrGenesisDocProvider attempts to load the state from the
  1360  // database, or creates one using the given genesisDocProvider and persists the
  1361  // result to the database. On success this also returns the genesis doc loaded
  1362  // through the given provider.
  1363  func LoadStateFromDBOrGenesisDocProvider(
  1364  	stateDB dbm.DB,
  1365  	genesisDocProvider GenesisDocProvider,
  1366  ) (sm.State, *types.GenesisDoc, error) {
  1367  	// Get genesis doc
  1368  	genDoc, err := loadGenesisDoc(stateDB)
  1369  	if err != nil {
  1370  		genDoc, err = genesisDocProvider()
  1371  		if err != nil {
  1372  			return sm.State{}, nil, err
  1373  		}
  1374  		// save genesis doc to prevent a certain class of user errors (e.g. when it
  1375  		// was changed, accidentally or not). Also good for audit trail.
  1376  		saveGenesisDoc(stateDB, genDoc)
  1377  	}
  1378  	stateStore := sm.NewStore(stateDB)
  1379  	state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
  1380  	if err != nil {
  1381  		return sm.State{}, nil, err
  1382  	}
  1383  	return state, genDoc, nil
  1384  }
  1385  
  1386  // panics if failed to unmarshal bytes
  1387  func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
  1388  	b, err := db.Get(genesisDocKey)
  1389  	if err != nil {
  1390  		panic(err)
  1391  	}
  1392  	if len(b) == 0 {
  1393  		return nil, errors.New("genesis doc not found")
  1394  	}
  1395  	var genDoc *types.GenesisDoc
  1396  	err = tmjson.Unmarshal(b, &genDoc)
  1397  	if err != nil {
  1398  		panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b))
  1399  	}
  1400  	return genDoc, nil
  1401  }
  1402  
  1403  // panics if failed to marshal the given genesis document
  1404  func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) {
  1405  	b, err := tmjson.Marshal(genDoc)
  1406  	if err != nil {
  1407  		panic(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
  1408  	}
  1409  	if err := db.SetSync(genesisDocKey, b); err != nil {
  1410  		panic(fmt.Sprintf("Failed to save genesis doc: %v", err))
  1411  	}
  1412  }
  1413  
  1414  func createAndStartPrivValidatorSocketClient(
  1415  	listenAddr,
  1416  	chainID string,
  1417  	logger log.Logger,
  1418  ) (types.PrivValidator, error) {
  1419  	pve, err := privval.NewSignerListener(listenAddr, logger)
  1420  	if err != nil {
  1421  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1422  	}
  1423  
  1424  	pvsc, err := privval.NewSignerClient(pve, chainID)
  1425  	if err != nil {
  1426  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1427  	}
  1428  
  1429  	// try to get a pubkey from private validate first time
  1430  	_, err = pvsc.GetPubKey()
  1431  	if err != nil {
  1432  		return nil, fmt.Errorf("can't get pubkey: %w", err)
  1433  	}
  1434  
  1435  	const (
  1436  		retries = 50 // 50 * 100ms = 5s total
  1437  		timeout = 100 * time.Millisecond
  1438  	)
  1439  	pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout)
  1440  
  1441  	return pvscWithRetries, nil
  1442  }
  1443  
  1444  // splitAndTrimEmpty slices s into all subslices separated by sep and returns a
  1445  // slice of the string s with all leading and trailing Unicode code points
  1446  // contained in cutset removed. If sep is empty, SplitAndTrim splits after each
  1447  // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
  1448  // -1.  also filter out empty strings, only return non-empty strings.
  1449  func splitAndTrimEmpty(s, sep, cutset string) []string {
  1450  	if s == "" {
  1451  		return []string{}
  1452  	}
  1453  
  1454  	spl := strings.Split(s, sep)
  1455  	nonEmptyStrings := make([]string, 0, len(spl))
  1456  	for i := 0; i < len(spl); i++ {
  1457  		element := strings.Trim(spl[i], cutset)
  1458  		if element != "" {
  1459  			nonEmptyStrings = append(nonEmptyStrings, element)
  1460  		}
  1461  	}
  1462  	return nonEmptyStrings
  1463  }