github.com/badrootd/celestia-core@v0.0.0-20240305091328-aa4207a4b25d/test/maverick/node/node.go (about)

     1  package node
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"net"
     9  	"net/http"
    10  	_ "net/http/pprof" //nolint: gosec // securely exposed on separate, optional port
    11  	"strconv"
    12  	"strings"
    13  	"time"
    14  
    15  	"github.com/prometheus/client_golang/prometheus"
    16  	"github.com/prometheus/client_golang/prometheus/promhttp"
    17  	"github.com/rs/cors"
    18  
    19  	dbm "github.com/cometbft/cometbft-db"
    20  
    21  	abci "github.com/badrootd/celestia-core/abci/types"
    22  	bcv0 "github.com/badrootd/celestia-core/blockchain/v0"
    23  	bcv1 "github.com/badrootd/celestia-core/blockchain/v1"
    24  	bcv2 "github.com/badrootd/celestia-core/blockchain/v2"
    25  	cfg "github.com/badrootd/celestia-core/config"
    26  	"github.com/badrootd/celestia-core/consensus"
    27  	"github.com/badrootd/celestia-core/crypto"
    28  	"github.com/badrootd/celestia-core/evidence"
    29  	cmtjson "github.com/badrootd/celestia-core/libs/json"
    30  	"github.com/badrootd/celestia-core/libs/log"
    31  	cmtpubsub "github.com/badrootd/celestia-core/libs/pubsub"
    32  	"github.com/badrootd/celestia-core/libs/service"
    33  	"github.com/badrootd/celestia-core/light"
    34  	mempl "github.com/badrootd/celestia-core/mempool"
    35  	mempoolv2 "github.com/badrootd/celestia-core/mempool/cat"
    36  	mempoolv0 "github.com/badrootd/celestia-core/mempool/v0"
    37  	mempoolv1 "github.com/badrootd/celestia-core/mempool/v1"
    38  	"github.com/badrootd/celestia-core/p2p"
    39  	"github.com/badrootd/celestia-core/p2p/pex"
    40  	"github.com/badrootd/celestia-core/pkg/trace"
    41  	"github.com/badrootd/celestia-core/privval"
    42  	"github.com/badrootd/celestia-core/proxy"
    43  	rpccore "github.com/badrootd/celestia-core/rpc/core"
    44  	grpccore "github.com/badrootd/celestia-core/rpc/grpc"
    45  	rpcserver "github.com/badrootd/celestia-core/rpc/jsonrpc/server"
    46  	sm "github.com/badrootd/celestia-core/state"
    47  	"github.com/badrootd/celestia-core/state/indexer"
    48  	blockidxkv "github.com/badrootd/celestia-core/state/indexer/block/kv"
    49  	blockidxnull "github.com/badrootd/celestia-core/state/indexer/block/null"
    50  	"github.com/badrootd/celestia-core/state/txindex"
    51  	"github.com/badrootd/celestia-core/state/txindex/kv"
    52  	"github.com/badrootd/celestia-core/state/txindex/null"
    53  	"github.com/badrootd/celestia-core/statesync"
    54  	"github.com/badrootd/celestia-core/store"
    55  	cs "github.com/badrootd/celestia-core/test/maverick/consensus"
    56  	"github.com/badrootd/celestia-core/types"
    57  	cmttime "github.com/badrootd/celestia-core/types/time"
    58  	"github.com/badrootd/celestia-core/version"
    59  )
    60  
    61  //------------------------------------------------------------------------------
    62  
    63  // ParseMisbehaviors is a util function that converts a comma separated string into
    64  // a map of misbehaviors to be executed by the maverick node
    65  func ParseMisbehaviors(str string) (map[int64]cs.Misbehavior, error) {
    66  	// check if string is empty in which case we run a normal node
    67  	misbehaviors := make(map[int64]cs.Misbehavior)
    68  	if str == "" {
    69  		return misbehaviors, nil
    70  	}
    71  	strs := strings.Split(str, ",")
    72  	if len(strs)%2 != 0 {
    73  		return misbehaviors, errors.New("missing either height or misbehavior name in the misbehavior flag")
    74  	}
    75  OUTER_LOOP:
    76  	for i := 0; i < len(strs); i += 2 {
    77  		height, err := strconv.ParseInt(strs[i+1], 10, 64)
    78  		if err != nil {
    79  			return misbehaviors, fmt.Errorf("failed to parse misbehavior height: %w", err)
    80  		}
    81  		for key, misbehavior := range cs.MisbehaviorList {
    82  			if key == strs[i] {
    83  				misbehaviors[height] = misbehavior
    84  				continue OUTER_LOOP
    85  			}
    86  		}
    87  		return misbehaviors, fmt.Errorf("received unknown misbehavior: %s. Did you forget to add it?", strs[i])
    88  	}
    89  
    90  	return misbehaviors, nil
    91  }
    92  
    93  // DBContext specifies config information for loading a new DB.
    94  type DBContext struct {
    95  	ID     string
    96  	Config *cfg.Config
    97  }
    98  
    99  // DBProvider takes a DBContext and returns an instantiated DB.
   100  type DBProvider func(*DBContext) (dbm.DB, error)
   101  
   102  const readHeaderTimeout = 10 * time.Second
   103  
   104  // DefaultDBProvider returns a database using the DBBackend and DBDir
   105  // specified in the ctx.Config.
   106  func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
   107  	dbType := dbm.BackendType(ctx.Config.DBBackend)
   108  	return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
   109  }
   110  
   111  // GenesisDocProvider returns a GenesisDoc.
   112  // It allows the GenesisDoc to be pulled from sources other than the
   113  // filesystem, for instance from a distributed key-value store cluster.
   114  type GenesisDocProvider func() (*types.GenesisDoc, error)
   115  
   116  // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
   117  // the GenesisDoc from the config.GenesisFile() on the filesystem.
   118  func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
   119  	return func() (*types.GenesisDoc, error) {
   120  		return types.GenesisDocFromFile(config.GenesisFile())
   121  	}
   122  }
   123  
   124  // Provider takes a config and a logger and returns a ready to go Node.
   125  type Provider func(*cfg.Config, log.Logger) (*Node, error)
   126  
   127  // DefaultNewNode returns a CometBFT node with default settings for the
   128  // PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
   129  // It implements NodeProvider.
   130  func DefaultNewNode(config *cfg.Config, logger log.Logger, misbehaviors map[int64]cs.Misbehavior) (*Node, error) {
   131  	nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
   132  	if err != nil {
   133  		return nil, fmt.Errorf("failed to load or gen node key %s, err: %w", config.NodeKeyFile(), err)
   134  	}
   135  
   136  	return NewNode(config,
   137  		LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()),
   138  		nodeKey,
   139  		proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
   140  		DefaultGenesisDocProviderFunc(config),
   141  		DefaultDBProvider,
   142  		DefaultMetricsProvider(config.Instrumentation),
   143  		logger,
   144  		misbehaviors,
   145  	)
   146  }
   147  
   148  // MetricsProvider returns a consensus, p2p and mempool Metrics.
   149  type MetricsProvider func(chainID string) (*consensus.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics)
   150  
   151  // DefaultMetricsProvider returns Metrics build using Prometheus client library
   152  // if Prometheus is enabled. Otherwise, it returns no-op Metrics.
   153  func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
   154  	return func(chainID string) (*consensus.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) {
   155  		if config.Prometheus {
   156  			return consensus.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   157  				p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   158  				mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   159  				sm.PrometheusMetrics(config.Namespace, "chain_id", chainID)
   160  		}
   161  		return consensus.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics()
   162  	}
   163  }
   164  
   165  // Option sets a parameter for the node.
   166  type Option func(*Node)
   167  
   168  // Temporary interface for switching to fast sync, we should get rid of v0 and v1 reactors.
   169  // See: https://github.com/cometbft/cometbft/issues/4595
   170  type fastSyncReactor interface {
   171  	SwitchToFastSync(sm.State) error
   172  }
   173  
   174  // CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to
   175  // the node's Switch.
   176  //
   177  // WARNING: using any name from the below list of the existing reactors will
   178  // result in replacing it with the custom one.
   179  //
   180  //   - MEMPOOL
   181  //   - BLOCKCHAIN
   182  //   - CONSENSUS
   183  //   - EVIDENCE
   184  //   - PEX
   185  //   - STATESYNC
   186  func CustomReactors(reactors map[string]p2p.Reactor) Option {
   187  	return func(n *Node) {
   188  		for name, reactor := range reactors {
   189  			if existingReactor := n.sw.Reactor(name); existingReactor != nil {
   190  				n.sw.Logger.Info("Replacing existing reactor with a custom one",
   191  					"name", name, "existing", existingReactor, "custom", reactor)
   192  				n.sw.RemoveReactor(name, existingReactor)
   193  			}
   194  			n.sw.AddReactor(name, reactor)
   195  		}
   196  	}
   197  }
   198  
   199  func CustomReactorsAsConstructors(reactors map[string]func(n *Node) p2p.Reactor) Option {
   200  	return func(n *Node) {
   201  		for name, customReactor := range reactors {
   202  			if existingReactor := n.sw.Reactor(name); existingReactor != nil {
   203  				n.sw.Logger.Info("Replacing existing reactor with a custom one",
   204  					"name", name)
   205  				n.sw.RemoveReactor(name, existingReactor)
   206  			}
   207  			n.sw.AddReactor(name, customReactor(n))
   208  		}
   209  	}
   210  }
   211  
   212  // StateProvider overrides the state provider used by state sync to retrieve trusted app hashes and
   213  // build a State object for bootstrapping the node.
   214  // WARNING: this interface is considered unstable and subject to change.
   215  func StateProvider(stateProvider statesync.StateProvider) Option {
   216  	return func(n *Node) {
   217  		n.stateSyncProvider = stateProvider
   218  	}
   219  }
   220  
   221  //------------------------------------------------------------------------------
   222  
   223  // Node is the highest level interface to a full CometBFT node.
   224  // It includes all configuration information and running services.
   225  type Node struct {
   226  	service.BaseService
   227  
   228  	// config
   229  	config        *cfg.Config
   230  	genesisDoc    *types.GenesisDoc   // initial validator set
   231  	privValidator types.PrivValidator // local node's validator key
   232  
   233  	// network
   234  	transport   *p2p.MultiplexTransport
   235  	sw          *p2p.Switch  // p2p connections
   236  	addrBook    pex.AddrBook // known peers
   237  	nodeInfo    p2p.NodeInfo
   238  	nodeKey     *p2p.NodeKey // our node privkey
   239  	isListening bool
   240  
   241  	// services
   242  	eventBus          *types.EventBus // pub/sub for services
   243  	stateStore        sm.Store
   244  	blockStore        *store.BlockStore // store the blockchain to disk
   245  	bcReactor         p2p.Reactor       // for fast-syncing
   246  	mempoolReactor    p2p.Reactor       // for gossipping transactions
   247  	mempool           mempl.Mempool
   248  	stateSync         bool                    // whether the node should state sync on startup
   249  	stateSyncReactor  *statesync.Reactor      // for hosting and restoring state sync snapshots
   250  	stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node
   251  	stateSyncGenesis  sm.State                // provides the genesis state for state sync
   252  	consensusState    *cs.State               // latest consensus state
   253  	consensusReactor  *cs.Reactor             // for participating in the consensus
   254  	pexReactor        *pex.Reactor            // for exchanging peer addresses
   255  	evidencePool      *evidence.Pool          // tracking evidence
   256  	proxyApp          proxy.AppConns          // connection to the application
   257  	rpcListeners      []net.Listener          // rpc servers
   258  	txIndexer         txindex.TxIndexer
   259  	blockIndexer      indexer.BlockIndexer
   260  	indexerService    *txindex.IndexerService
   261  	prometheusSrv     *http.Server
   262  }
   263  
   264  func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
   265  	var blockStoreDB dbm.DB
   266  	blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
   267  	if err != nil {
   268  		return
   269  	}
   270  	blockStore = store.NewBlockStore(blockStoreDB)
   271  
   272  	stateDB, err = dbProvider(&DBContext{"state", config})
   273  	if err != nil {
   274  		return
   275  	}
   276  
   277  	return
   278  }
   279  
   280  func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) {
   281  	proxyApp := proxy.NewAppConns(clientCreator)
   282  	proxyApp.SetLogger(logger.With("module", "proxy"))
   283  	if err := proxyApp.Start(); err != nil {
   284  		return nil, fmt.Errorf("error starting proxy app connections: %v", err)
   285  	}
   286  	return proxyApp, nil
   287  }
   288  
   289  func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
   290  	eventBus := types.NewEventBus()
   291  	eventBus.SetLogger(logger.With("module", "events"))
   292  	if err := eventBus.Start(); err != nil {
   293  		return nil, err
   294  	}
   295  	return eventBus, nil
   296  }
   297  
   298  func createAndStartIndexerService(
   299  	config *cfg.Config,
   300  	dbProvider DBProvider,
   301  	eventBus *types.EventBus,
   302  	logger log.Logger,
   303  ) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) {
   304  	var (
   305  		txIndexer    txindex.TxIndexer
   306  		blockIndexer indexer.BlockIndexer
   307  	)
   308  
   309  	switch config.TxIndex.Indexer {
   310  	case "kv":
   311  		store, err := dbProvider(&DBContext{"tx_index", config})
   312  		if err != nil {
   313  			return nil, nil, nil, err
   314  		}
   315  
   316  		txIndexer = kv.NewTxIndex(store)
   317  		blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events")))
   318  	default:
   319  		txIndexer = &null.TxIndex{}
   320  		blockIndexer = &blockidxnull.BlockerIndexer{}
   321  	}
   322  
   323  	indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus, false)
   324  	indexerService.SetLogger(logger.With("module", "txindex"))
   325  
   326  	if err := indexerService.Start(); err != nil {
   327  		return nil, nil, nil, err
   328  	}
   329  
   330  	return indexerService, txIndexer, blockIndexer, nil
   331  }
   332  
   333  func doHandshake(
   334  	stateStore sm.Store,
   335  	state sm.State,
   336  	blockStore sm.BlockStore,
   337  	genDoc *types.GenesisDoc,
   338  	eventBus types.BlockEventPublisher,
   339  	proxyApp proxy.AppConns,
   340  	consensusLogger log.Logger,
   341  ) error {
   342  	handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc)
   343  	handshaker.SetLogger(consensusLogger)
   344  	handshaker.SetEventBus(eventBus)
   345  	if err := handshaker.Handshake(proxyApp); err != nil {
   346  		return fmt.Errorf("error during handshake: %v", err)
   347  	}
   348  	return nil
   349  }
   350  
   351  func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) {
   352  	// Log the version info.
   353  	logger.Info("Version info",
   354  		"software", version.TMCoreSemVer,
   355  		"block", version.BlockProtocol,
   356  		"p2p", version.P2PProtocol,
   357  	)
   358  
   359  	// If the state and software differ in block version, at least log it.
   360  	if state.Version.Consensus.Block != version.BlockProtocol {
   361  		logger.Info("Software and state have different block protocols",
   362  			"software", version.BlockProtocol,
   363  			"state", state.Version.Consensus.Block,
   364  		)
   365  	}
   366  
   367  	addr := pubKey.Address()
   368  	// Log whether this node is a validator or an observer
   369  	if state.Validators.HasAddress(addr) {
   370  		consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey)
   371  	} else {
   372  		consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
   373  	}
   374  }
   375  
   376  func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
   377  	if state.Validators.Size() > 1 {
   378  		return false
   379  	}
   380  	addr, _ := state.Validators.GetByIndex(0)
   381  	return bytes.Equal(pubKey.Address(), addr)
   382  }
   383  
   384  func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns,
   385  	state sm.State, memplMetrics *mempl.Metrics, logger log.Logger,
   386  ) (p2p.Reactor, mempl.Mempool) {
   387  	switch config.Mempool.Version {
   388  	case cfg.MempoolV2:
   389  		mp := mempoolv2.NewTxPool(
   390  			logger,
   391  			config.Mempool,
   392  			proxyApp.Mempool(),
   393  			state.LastBlockHeight,
   394  			mempoolv2.WithMetrics(memplMetrics),
   395  			mempoolv2.WithPreCheck(sm.TxPreCheck(state)),
   396  			mempoolv2.WithPostCheck(sm.TxPostCheck(state)),
   397  		)
   398  
   399  		reactor, err := mempoolv2.NewReactor(
   400  			mp,
   401  			&mempoolv2.ReactorOptions{
   402  				ListenOnly:     !config.Mempool.Broadcast,
   403  				MaxTxSize:      config.Mempool.MaxTxBytes,
   404  				MaxGossipDelay: config.Mempool.MaxGossipDelay,
   405  			},
   406  		)
   407  		if err != nil {
   408  			// TODO: find a more polite way of handling this error
   409  			panic(err)
   410  		}
   411  		if config.Consensus.WaitForTxs() {
   412  			mp.EnableTxsAvailable()
   413  		}
   414  
   415  		return reactor, mp
   416  	case cfg.MempoolV1:
   417  		mp := mempoolv1.NewTxMempool(
   418  			logger,
   419  			config.Mempool,
   420  			proxyApp.Mempool(),
   421  			state.LastBlockHeight,
   422  			mempoolv1.WithMetrics(memplMetrics),
   423  			mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
   424  			mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
   425  		)
   426  
   427  		reactor := mempoolv1.NewReactor(
   428  			config.Mempool,
   429  			mp,
   430  			&trace.Client{},
   431  		)
   432  		if config.Consensus.WaitForTxs() {
   433  			mp.EnableTxsAvailable()
   434  		}
   435  
   436  		return reactor, mp
   437  
   438  	case cfg.MempoolV0:
   439  		mp := mempoolv0.NewCListMempool(
   440  			config.Mempool,
   441  			proxyApp.Mempool(),
   442  			state.LastBlockHeight,
   443  			mempoolv0.WithMetrics(memplMetrics),
   444  			mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
   445  			mempoolv0.WithPostCheck(sm.TxPostCheck(state)),
   446  		)
   447  
   448  		mp.SetLogger(logger)
   449  		mp.SetLogger(logger)
   450  
   451  		reactor := mempoolv0.NewReactor(
   452  			config.Mempool,
   453  			mp,
   454  		)
   455  		if config.Consensus.WaitForTxs() {
   456  			mp.EnableTxsAvailable()
   457  		}
   458  
   459  		return reactor, mp
   460  
   461  	default:
   462  		return nil, nil
   463  	}
   464  }
   465  
   466  func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
   467  	stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger,
   468  ) (*evidence.Reactor, *evidence.Pool, error) {
   469  	evidenceDB, err := dbProvider(&DBContext{"evidence", config})
   470  	if err != nil {
   471  		return nil, nil, err
   472  	}
   473  	stateStore := sm.NewStore(stateDB, sm.StoreOptions{
   474  		DiscardABCIResponses: config.Storage.DiscardABCIResponses,
   475  	})
   476  	evidenceLogger := logger.With("module", "evidence")
   477  	evidencePool, err := evidence.NewPool(evidenceDB, stateStore, blockStore)
   478  	if err != nil {
   479  		return nil, nil, err
   480  	}
   481  	evidenceReactor := evidence.NewReactor(evidencePool)
   482  	evidenceReactor.SetLogger(evidenceLogger)
   483  	return evidenceReactor, evidencePool, nil
   484  }
   485  
   486  func createBlockchainReactor(config *cfg.Config,
   487  	state sm.State,
   488  	blockExec *sm.BlockExecutor,
   489  	blockStore *store.BlockStore,
   490  	fastSync bool,
   491  	logger log.Logger,
   492  ) (bcReactor p2p.Reactor, err error) {
   493  	switch config.FastSync.Version {
   494  	case "v0":
   495  		bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   496  	case "v1":
   497  		bcReactor = bcv1.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   498  	case "v2":
   499  		bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   500  	default:
   501  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
   502  	}
   503  
   504  	bcReactor.SetLogger(logger.With("module", "blockchain"))
   505  	return bcReactor, nil
   506  }
   507  
   508  func createConsensusReactor(config *cfg.Config,
   509  	state sm.State,
   510  	blockExec *sm.BlockExecutor,
   511  	blockStore sm.BlockStore,
   512  	mempool mempl.Mempool,
   513  	evidencePool *evidence.Pool,
   514  	privValidator types.PrivValidator,
   515  	csMetrics *consensus.Metrics,
   516  	waitSync bool,
   517  	eventBus *types.EventBus,
   518  	consensusLogger log.Logger,
   519  	misbehaviors map[int64]cs.Misbehavior,
   520  ) (*cs.Reactor, *cs.State) {
   521  	consensusState := cs.NewState(
   522  		config.Consensus,
   523  		state.Copy(),
   524  		blockExec,
   525  		blockStore,
   526  		mempool,
   527  		evidencePool,
   528  		misbehaviors,
   529  		cs.StateMetrics(csMetrics),
   530  	)
   531  	consensusState.SetLogger(consensusLogger)
   532  	if privValidator != nil {
   533  		consensusState.SetPrivValidator(privValidator)
   534  	}
   535  	consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics))
   536  	consensusReactor.SetLogger(consensusLogger)
   537  	// services which will be publishing and/or subscribing for messages (events)
   538  	// consensusReactor will set it on consensusState and blockExecutor
   539  	consensusReactor.SetEventBus(eventBus)
   540  	return consensusReactor, consensusState
   541  }
   542  
   543  func createTransport(
   544  	config *cfg.Config,
   545  	nodeInfo p2p.NodeInfo,
   546  	nodeKey *p2p.NodeKey,
   547  	proxyApp proxy.AppConns,
   548  ) (
   549  	*p2p.MultiplexTransport,
   550  	[]p2p.PeerFilterFunc,
   551  ) {
   552  	var (
   553  		mConnConfig = p2p.MConnConfig(config.P2P)
   554  		transport   = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig)
   555  		connFilters = []p2p.ConnFilterFunc{}
   556  		peerFilters = []p2p.PeerFilterFunc{}
   557  	)
   558  
   559  	if !config.P2P.AllowDuplicateIP {
   560  		connFilters = append(connFilters, p2p.ConnDuplicateIPFilter())
   561  	}
   562  
   563  	// Filter peers by addr or pubkey with an ABCI query.
   564  	// If the query return code is OK, add peer.
   565  	if config.FilterPeers {
   566  		connFilters = append(
   567  			connFilters,
   568  			// ABCI query for address filtering.
   569  			func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
   570  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   571  					Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
   572  				})
   573  				if err != nil {
   574  					return err
   575  				}
   576  				if res.IsErr() {
   577  					return fmt.Errorf("error querying abci app: %v", res)
   578  				}
   579  
   580  				return nil
   581  			},
   582  		)
   583  
   584  		peerFilters = append(
   585  			peerFilters,
   586  			// ABCI query for ID filtering.
   587  			func(_ p2p.IPeerSet, p p2p.Peer) error {
   588  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   589  					Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
   590  				})
   591  				if err != nil {
   592  					return err
   593  				}
   594  				if res.IsErr() {
   595  					return fmt.Errorf("error querying abci app: %v", res)
   596  				}
   597  
   598  				return nil
   599  			},
   600  		)
   601  	}
   602  
   603  	p2p.MultiplexTransportConnFilters(connFilters...)(transport)
   604  
   605  	// Limit the number of incoming connections.
   606  	max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   607  	p2p.MultiplexTransportMaxIncomingConnections(max)(transport)
   608  
   609  	return transport, peerFilters
   610  }
   611  
   612  func createSwitch(config *cfg.Config,
   613  	transport p2p.Transport,
   614  	p2pMetrics *p2p.Metrics,
   615  	peerFilters []p2p.PeerFilterFunc,
   616  	mempoolReactor p2p.Reactor,
   617  	bcReactor p2p.Reactor,
   618  	stateSyncReactor *statesync.Reactor,
   619  	consensusReactor *cs.Reactor,
   620  	evidenceReactor *evidence.Reactor,
   621  	nodeInfo p2p.NodeInfo,
   622  	nodeKey *p2p.NodeKey,
   623  	p2pLogger log.Logger,
   624  ) *p2p.Switch {
   625  	sw := p2p.NewSwitch(
   626  		config.P2P,
   627  		transport,
   628  		p2p.WithMetrics(p2pMetrics),
   629  		p2p.SwitchPeerFilters(peerFilters...),
   630  	)
   631  	sw.SetLogger(p2pLogger)
   632  	sw.AddReactor("MEMPOOL", mempoolReactor)
   633  	sw.AddReactor("BLOCKCHAIN", bcReactor)
   634  	sw.AddReactor("CONSENSUS", consensusReactor)
   635  	sw.AddReactor("EVIDENCE", evidenceReactor)
   636  	sw.AddReactor("STATESYNC", stateSyncReactor)
   637  
   638  	sw.SetNodeInfo(nodeInfo)
   639  	sw.SetNodeKey(nodeKey)
   640  
   641  	p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
   642  	return sw
   643  }
   644  
   645  func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
   646  	p2pLogger log.Logger, nodeKey *p2p.NodeKey,
   647  ) (pex.AddrBook, error) {
   648  	addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
   649  	addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
   650  
   651  	// Add ourselves to addrbook to prevent dialing ourselves
   652  	if config.P2P.ExternalAddress != "" {
   653  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress))
   654  		if err != nil {
   655  			return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err)
   656  		}
   657  		addrBook.AddOurAddress(addr)
   658  	}
   659  	if config.P2P.ListenAddress != "" {
   660  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress))
   661  		if err != nil {
   662  			return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err)
   663  		}
   664  		addrBook.AddOurAddress(addr)
   665  	}
   666  
   667  	sw.SetAddrBook(addrBook)
   668  
   669  	return addrBook, nil
   670  }
   671  
   672  func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
   673  	sw *p2p.Switch, logger log.Logger,
   674  ) *pex.Reactor {
   675  	// TODO persistent peers ? so we can have their DNS addrs saved
   676  	pexReactor := pex.NewReactor(addrBook,
   677  		&pex.ReactorConfig{
   678  			Seeds:    splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
   679  			SeedMode: config.P2P.SeedMode,
   680  			// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
   681  			// blocks assuming 10s blocks ~ 28 hours.
   682  			// TODO (melekes): make it dynamic based on the actual block latencies
   683  			// from the live network.
   684  			// https://github.com/cometbft/cometbft/issues/3523
   685  			SeedDisconnectWaitPeriod:     28 * time.Hour,
   686  			PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
   687  		})
   688  	pexReactor.SetLogger(logger.With("module", "pex"))
   689  	sw.AddReactor("PEX", pexReactor)
   690  	return pexReactor
   691  }
   692  
   693  // startStateSync starts an asynchronous state sync process, then switches to fast sync mode.
   694  func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor,
   695  	stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool,
   696  	stateStore sm.Store, blockStore *store.BlockStore, state sm.State,
   697  ) error {
   698  	ssR.Logger.Info("Starting state sync")
   699  
   700  	if stateProvider == nil {
   701  		var err error
   702  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   703  		defer cancel()
   704  		stateProvider, err = statesync.NewLightClientStateProvider(
   705  			ctx,
   706  			state.ChainID, state.Version, state.InitialHeight,
   707  			config.RPCServers, light.TrustOptions{
   708  				Period: config.TrustPeriod,
   709  				Height: config.TrustHeight,
   710  				Hash:   config.TrustHashBytes(),
   711  			}, ssR.Logger.With("module", "light"))
   712  		if err != nil {
   713  			return fmt.Errorf("failed to set up light client state provider: %w", err)
   714  		}
   715  	}
   716  
   717  	go func() {
   718  		state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime)
   719  		if err != nil {
   720  			ssR.Logger.Error("State sync failed", "err", err)
   721  			return
   722  		}
   723  		err = stateStore.Bootstrap(state)
   724  		if err != nil {
   725  			ssR.Logger.Error("Failed to bootstrap node with new state", "err", err)
   726  			return
   727  		}
   728  		err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit)
   729  		if err != nil {
   730  			ssR.Logger.Error("Failed to store last seen commit", "err", err)
   731  			return
   732  		}
   733  
   734  		if fastSync {
   735  			// FIXME Very ugly to have these metrics bleed through here.
   736  			conR.Metrics.StateSyncing.Set(0)
   737  			conR.Metrics.FastSyncing.Set(1)
   738  			err = bcR.SwitchToFastSync(state)
   739  			if err != nil {
   740  				ssR.Logger.Error("Failed to switch to fast sync", "err", err)
   741  				return
   742  			}
   743  		} else {
   744  			conR.SwitchToConsensus(state, true)
   745  		}
   746  	}()
   747  	return nil
   748  }
   749  
   750  // NewNode returns a new, ready to go, CometBFT Node.
   751  func NewNode(config *cfg.Config,
   752  	privValidator types.PrivValidator,
   753  	nodeKey *p2p.NodeKey,
   754  	clientCreator proxy.ClientCreator,
   755  	genesisDocProvider GenesisDocProvider,
   756  	dbProvider DBProvider,
   757  	metricsProvider MetricsProvider,
   758  	logger log.Logger,
   759  	misbehaviors map[int64]cs.Misbehavior,
   760  	options ...Option,
   761  ) (*Node, error) {
   762  	blockStore, stateDB, err := initDBs(config, dbProvider)
   763  	if err != nil {
   764  		return nil, err
   765  	}
   766  
   767  	stateStore := sm.NewStore(stateDB, sm.StoreOptions{
   768  		DiscardABCIResponses: false,
   769  	})
   770  
   771  	state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
   772  	if err != nil {
   773  		return nil, err
   774  	}
   775  
   776  	// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
   777  	proxyApp, err := createAndStartProxyAppConns(clientCreator, logger)
   778  	if err != nil {
   779  		return nil, err
   780  	}
   781  
   782  	// EventBus and IndexerService must be started before the handshake because
   783  	// we might need to index the txs of the replayed block as this might not have happened
   784  	// when the node stopped last time (i.e. the node stopped after it saved the block
   785  	// but before it indexed the txs, or, endblocker panicked)
   786  	eventBus, err := createAndStartEventBus(logger)
   787  	if err != nil {
   788  		return nil, err
   789  	}
   790  
   791  	indexerService, txIndexer, blockIndexer, err := createAndStartIndexerService(config, dbProvider, eventBus, logger)
   792  	if err != nil {
   793  		return nil, err
   794  	}
   795  
   796  	// If an address is provided, listen on the socket for a connection from an
   797  	// external signing process.
   798  	if config.PrivValidatorListenAddr != "" {
   799  		// FIXME: we should start services inside OnStart
   800  		privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, genDoc.ChainID, logger)
   801  		if err != nil {
   802  			return nil, fmt.Errorf("error with private validator socket client: %w", err)
   803  		}
   804  	}
   805  
   806  	pubKey, err := privValidator.GetPubKey()
   807  	if err != nil {
   808  		return nil, fmt.Errorf("can't get pubkey: %w", err)
   809  	}
   810  
   811  	// Determine whether we should do state and/or fast sync.
   812  	// We don't fast-sync when the only validator is us.
   813  	fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey)
   814  	stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey)
   815  	if stateSync && state.LastBlockHeight > 0 {
   816  		logger.Info("Found local state with non-zero height, skipping state sync")
   817  		stateSync = false
   818  	}
   819  
   820  	// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
   821  	// and replays any blocks as necessary to sync CometBFT with the app.
   822  	consensusLogger := logger.With("module", "consensus")
   823  	if !stateSync {
   824  		if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil {
   825  			return nil, err
   826  		}
   827  
   828  		// Reload the state. It will have the Version.Consensus.App set by the
   829  		// Handshake, and may have other modifications as well (ie. depending on
   830  		// what happened during block replay).
   831  		state, err = stateStore.Load()
   832  		if err != nil {
   833  			return nil, fmt.Errorf("cannot load state: %w", err)
   834  		}
   835  	}
   836  
   837  	logNodeStartupInfo(state, pubKey, logger, consensusLogger)
   838  
   839  	csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID)
   840  
   841  	// Make MempoolReactor
   842  	mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
   843  
   844  	// Make Evidence Reactor
   845  	evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger)
   846  	if err != nil {
   847  		return nil, err
   848  	}
   849  
   850  	// make block executor for consensus and blockchain reactors to execute blocks
   851  	blockExec := sm.NewBlockExecutor(
   852  		stateStore,
   853  		logger.With("module", "state"),
   854  		proxyApp.Consensus(),
   855  		mempool,
   856  		evidencePool,
   857  		sm.BlockExecutorWithMetrics(smMetrics),
   858  	)
   859  
   860  	// Make BlockchainReactor. Don't start fast sync if we're doing a state sync first.
   861  	bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync && !stateSync, logger)
   862  	if err != nil {
   863  		return nil, fmt.Errorf("could not create blockchain reactor: %w", err)
   864  	}
   865  
   866  	// Make ConsensusReactor. Don't enable fully if doing a state sync and/or fast sync first.
   867  	// FIXME We need to update metrics here, since other reactors don't have access to them.
   868  	if stateSync {
   869  		csMetrics.StateSyncing.Set(1)
   870  	} else if fastSync {
   871  		csMetrics.FastSyncing.Set(1)
   872  	}
   873  
   874  	logger.Info("Setting up maverick consensus reactor", "Misbehaviors", misbehaviors)
   875  	consensusReactor, consensusState := createConsensusReactor(
   876  		config, state, blockExec, blockStore, mempool, evidencePool,
   877  		privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger, misbehaviors)
   878  
   879  	// Set up state sync reactor, and schedule a sync if requested.
   880  	// FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy,
   881  	// we should clean this whole thing up. See:
   882  	// https://github.com/cometbft/cometbft/issues/4644
   883  	stateSyncReactor := statesync.NewReactor(
   884  		*config.StateSync,
   885  		proxyApp.Snapshot(),
   886  		proxyApp.Query(),
   887  		config.StateSync.TempDir,
   888  	)
   889  	stateSyncReactor.SetLogger(logger.With("module", "statesync"))
   890  
   891  	nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state)
   892  	if err != nil {
   893  		return nil, err
   894  	}
   895  
   896  	// Setup Transport.
   897  	transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp)
   898  
   899  	// Setup Switch.
   900  	p2pLogger := logger.With("module", "p2p")
   901  	sw := createSwitch(
   902  		config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor,
   903  		stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger,
   904  	)
   905  
   906  	err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
   907  	if err != nil {
   908  		return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err)
   909  	}
   910  
   911  	err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   912  	if err != nil {
   913  		return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
   914  	}
   915  
   916  	addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
   917  	if err != nil {
   918  		return nil, fmt.Errorf("could not create addrbook: %w", err)
   919  	}
   920  
   921  	// Optionally, start the pex reactor
   922  	//
   923  	// TODO:
   924  	//
   925  	// We need to set Seeds and PersistentPeers on the switch,
   926  	// since it needs to be able to use these (and their DNS names)
   927  	// even if the PEX is off. We can include the DNS name in the NetAddress,
   928  	// but it would still be nice to have a clear list of the current "PersistentPeers"
   929  	// somewhere that we can return with net_info.
   930  	//
   931  	// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
   932  	// Note we currently use the addrBook regardless at least for AddOurAddress
   933  	var pexReactor *pex.Reactor
   934  	if config.P2P.PexReactor {
   935  		pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
   936  	}
   937  
   938  	if config.RPC.PprofListenAddress != "" {
   939  		go func() {
   940  			logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress)
   941  			//nolint:gosec,nolintlint // G114: Use of net/http serve function that has no support for setting timeouts
   942  			logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil))
   943  		}()
   944  	}
   945  
   946  	node := &Node{
   947  		config:        config,
   948  		genesisDoc:    genDoc,
   949  		privValidator: privValidator,
   950  
   951  		transport: transport,
   952  		sw:        sw,
   953  		addrBook:  addrBook,
   954  		nodeInfo:  nodeInfo,
   955  		nodeKey:   nodeKey,
   956  
   957  		stateStore:       stateStore,
   958  		blockStore:       blockStore,
   959  		bcReactor:        bcReactor,
   960  		mempoolReactor:   mempoolReactor,
   961  		mempool:          mempool,
   962  		consensusState:   consensusState,
   963  		consensusReactor: consensusReactor,
   964  		stateSyncReactor: stateSyncReactor,
   965  		stateSync:        stateSync,
   966  		stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state
   967  		pexReactor:       pexReactor,
   968  		evidencePool:     evidencePool,
   969  		proxyApp:         proxyApp,
   970  		txIndexer:        txIndexer,
   971  		indexerService:   indexerService,
   972  		blockIndexer:     blockIndexer,
   973  		eventBus:         eventBus,
   974  	}
   975  	node.BaseService = *service.NewBaseService(logger, "Node", node)
   976  
   977  	for _, option := range options {
   978  		option(node)
   979  	}
   980  
   981  	return node, nil
   982  }
   983  
   984  // OnStart starts the Node. It implements service.Service.
   985  func (n *Node) OnStart() error {
   986  	now := cmttime.Now()
   987  	genTime := n.genesisDoc.GenesisTime
   988  	if genTime.After(now) {
   989  		n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime)
   990  		time.Sleep(genTime.Sub(now))
   991  	}
   992  
   993  	// Add private IDs to addrbook to block those peers being added
   994  	n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
   995  
   996  	// Start the RPC server before the P2P server
   997  	// so we can eg. receive txs for the first block
   998  	if n.config.RPC.ListenAddress != "" {
   999  		listeners, err := n.startRPC()
  1000  		if err != nil {
  1001  			return err
  1002  		}
  1003  		n.rpcListeners = listeners
  1004  	}
  1005  
  1006  	if n.config.Instrumentation.Prometheus &&
  1007  		n.config.Instrumentation.PrometheusListenAddr != "" {
  1008  		n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
  1009  	}
  1010  
  1011  	// Start the transport.
  1012  	addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress))
  1013  	if err != nil {
  1014  		return err
  1015  	}
  1016  	if err := n.transport.Listen(*addr); err != nil {
  1017  		return err
  1018  	}
  1019  
  1020  	n.isListening = true
  1021  
  1022  	// Start the switch (the P2P server).
  1023  	err = n.sw.Start()
  1024  	if err != nil {
  1025  		return err
  1026  	}
  1027  
  1028  	// Always connect to persistent peers
  1029  	err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
  1030  	if err != nil {
  1031  		return fmt.Errorf("could not dial peers from persistent_peers field: %w", err)
  1032  	}
  1033  
  1034  	// Run state sync
  1035  	if n.stateSync {
  1036  		bcR, ok := n.bcReactor.(fastSyncReactor)
  1037  		if !ok {
  1038  			return fmt.Errorf("this blockchain reactor does not support switching from state sync")
  1039  		}
  1040  		err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider,
  1041  			n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis)
  1042  		if err != nil {
  1043  			return fmt.Errorf("failed to start state sync: %w", err)
  1044  		}
  1045  	}
  1046  
  1047  	return nil
  1048  }
  1049  
  1050  // OnStop stops the Node. It implements service.Service.
  1051  func (n *Node) OnStop() {
  1052  	n.BaseService.OnStop()
  1053  
  1054  	n.Logger.Info("Stopping Node")
  1055  
  1056  	// first stop the non-reactor services
  1057  	if err := n.eventBus.Stop(); err != nil {
  1058  		n.Logger.Error("Error closing eventBus", "err", err)
  1059  	}
  1060  	if err := n.indexerService.Stop(); err != nil {
  1061  		n.Logger.Error("Error closing indexerService", "err", err)
  1062  	}
  1063  
  1064  	// now stop the reactors
  1065  	if err := n.sw.Stop(); err != nil {
  1066  		n.Logger.Error("Error closing switch", "err", err)
  1067  	}
  1068  
  1069  	if err := n.transport.Close(); err != nil {
  1070  		n.Logger.Error("Error closing transport", "err", err)
  1071  	}
  1072  
  1073  	n.isListening = false
  1074  
  1075  	// finally stop the listeners / external services
  1076  	for _, l := range n.rpcListeners {
  1077  		n.Logger.Info("Closing rpc listener", "listener", l)
  1078  		if err := l.Close(); err != nil {
  1079  			n.Logger.Error("Error closing listener", "listener", l, "err", err)
  1080  		}
  1081  	}
  1082  
  1083  	if pvsc, ok := n.privValidator.(service.Service); ok {
  1084  		if err := pvsc.Stop(); err != nil {
  1085  			n.Logger.Error("Error closing private validator", "err", err)
  1086  		}
  1087  	}
  1088  
  1089  	if n.prometheusSrv != nil {
  1090  		if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
  1091  			// Error from closing listeners, or context timeout:
  1092  			n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
  1093  		}
  1094  	}
  1095  }
  1096  
  1097  // ConfigureRPC makes sure RPC has all the objects it needs to operate.
  1098  func (n *Node) ConfigureRPC() error {
  1099  	pubKey, err := n.privValidator.GetPubKey()
  1100  	if err != nil {
  1101  		return fmt.Errorf("can't get pubkey: %w", err)
  1102  	}
  1103  	rpccore.SetEnvironment(&rpccore.Environment{
  1104  		ProxyAppQuery:   n.proxyApp.Query(),
  1105  		ProxyAppMempool: n.proxyApp.Mempool(),
  1106  
  1107  		StateStore:     n.stateStore,
  1108  		BlockStore:     n.blockStore,
  1109  		EvidencePool:   n.evidencePool,
  1110  		ConsensusState: n.consensusState,
  1111  		P2PPeers:       n.sw,
  1112  		P2PTransport:   n,
  1113  
  1114  		PubKey:           pubKey,
  1115  		GenDoc:           n.genesisDoc,
  1116  		TxIndexer:        n.txIndexer,
  1117  		BlockIndexer:     n.blockIndexer,
  1118  		ConsensusReactor: &consensus.Reactor{},
  1119  		EventBus:         n.eventBus,
  1120  		Mempool:          n.mempool,
  1121  
  1122  		Logger: n.Logger.With("module", "rpc"),
  1123  
  1124  		Config: *n.config.RPC,
  1125  	})
  1126  	return nil
  1127  }
  1128  
  1129  func (n *Node) startRPC() ([]net.Listener, error) {
  1130  	err := n.ConfigureRPC()
  1131  	if err != nil {
  1132  		return nil, err
  1133  	}
  1134  
  1135  	listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
  1136  
  1137  	if n.config.RPC.Unsafe {
  1138  		rpccore.AddUnsafeRoutes()
  1139  	}
  1140  
  1141  	config := rpcserver.DefaultConfig()
  1142  	config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1143  	config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1144  	config.MaxOpenConnections = n.config.RPC.MaxOpenConnections
  1145  	// If necessary adjust global WriteTimeout to ensure it's greater than
  1146  	// TimeoutBroadcastTxCommit.
  1147  	// See https://github.com/cometbft/cometbft/issues/3435
  1148  	if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1149  		config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1150  	}
  1151  
  1152  	// we may expose the rpc over both a unix and tcp socket
  1153  	listeners := make([]net.Listener, len(listenAddrs))
  1154  	for i, listenAddr := range listenAddrs {
  1155  		mux := http.NewServeMux()
  1156  		rpcLogger := n.Logger.With("module", "rpc-server")
  1157  		wmLogger := rpcLogger.With("protocol", "websocket")
  1158  		wm := rpcserver.NewWebsocketManager(rpccore.Routes,
  1159  			rpcserver.OnDisconnect(func(remoteAddr string) {
  1160  				err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr)
  1161  				if err != nil && err != cmtpubsub.ErrSubscriptionNotFound {
  1162  					wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err)
  1163  				}
  1164  			}),
  1165  			rpcserver.ReadLimit(config.MaxBodyBytes),
  1166  		)
  1167  		wm.SetLogger(wmLogger)
  1168  		mux.HandleFunc("/websocket", wm.WebsocketHandler)
  1169  		rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger)
  1170  		listener, err := rpcserver.Listen(
  1171  			listenAddr,
  1172  			config,
  1173  		)
  1174  		if err != nil {
  1175  			return nil, err
  1176  		}
  1177  
  1178  		var rootHandler http.Handler = mux
  1179  		if n.config.RPC.IsCorsEnabled() {
  1180  			corsMiddleware := cors.New(cors.Options{
  1181  				AllowedOrigins: n.config.RPC.CORSAllowedOrigins,
  1182  				AllowedMethods: n.config.RPC.CORSAllowedMethods,
  1183  				AllowedHeaders: n.config.RPC.CORSAllowedHeaders,
  1184  			})
  1185  			rootHandler = corsMiddleware.Handler(mux)
  1186  		}
  1187  		if n.config.RPC.IsTLSEnabled() {
  1188  			go func() {
  1189  				if err := rpcserver.ServeTLS(
  1190  					listener,
  1191  					rootHandler,
  1192  					n.config.RPC.CertFile(),
  1193  					n.config.RPC.KeyFile(),
  1194  					rpcLogger,
  1195  					config,
  1196  				); err != nil {
  1197  					n.Logger.Error("Error serving server with TLS", "err", err)
  1198  				}
  1199  			}()
  1200  		} else {
  1201  			go func() {
  1202  				if err := rpcserver.Serve(
  1203  					listener,
  1204  					rootHandler,
  1205  					rpcLogger,
  1206  					config,
  1207  				); err != nil {
  1208  					n.Logger.Error("Error serving server", "err", err)
  1209  				}
  1210  			}()
  1211  		}
  1212  
  1213  		listeners[i] = listener
  1214  	}
  1215  
  1216  	// we expose a simplified api over grpc for convenience to app devs
  1217  	grpcListenAddr := n.config.RPC.GRPCListenAddress
  1218  	if grpcListenAddr != "" {
  1219  		config := rpcserver.DefaultConfig()
  1220  		config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1221  		config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1222  		// NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections
  1223  		config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections
  1224  		// If necessary adjust global WriteTimeout to ensure it's greater than
  1225  		// TimeoutBroadcastTxCommit.
  1226  		// See https://github.com/cometbft/cometbft/issues/3435
  1227  		if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1228  			config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1229  		}
  1230  		listener, err := rpcserver.Listen(grpcListenAddr, config)
  1231  		if err != nil {
  1232  			return nil, err
  1233  		}
  1234  		go func() {
  1235  			if err := grpccore.StartGRPCServer(listener); err != nil {
  1236  				n.Logger.Error("Error starting gRPC server", "err", err)
  1237  			}
  1238  		}()
  1239  		listeners = append(listeners, listener)
  1240  	}
  1241  
  1242  	return listeners, nil
  1243  }
  1244  
  1245  // startPrometheusServer starts a Prometheus HTTP server, listening for metrics
  1246  // collectors on addr.
  1247  func (n *Node) startPrometheusServer(addr string) *http.Server {
  1248  	srv := &http.Server{
  1249  		Addr: addr,
  1250  		Handler: promhttp.InstrumentMetricHandler(
  1251  			prometheus.DefaultRegisterer, promhttp.HandlerFor(
  1252  				prometheus.DefaultGatherer,
  1253  				promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
  1254  			),
  1255  		),
  1256  		ReadHeaderTimeout: readHeaderTimeout,
  1257  	}
  1258  	go func() {
  1259  		if err := srv.ListenAndServe(); err != http.ErrServerClosed {
  1260  			// Error starting or closing listener:
  1261  			n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
  1262  		}
  1263  	}()
  1264  	return srv
  1265  }
  1266  
  1267  // Switch returns the Node's Switch.
  1268  func (n *Node) Switch() *p2p.Switch {
  1269  	return n.sw
  1270  }
  1271  
  1272  // BlockStore returns the Node's BlockStore.
  1273  func (n *Node) BlockStore() *store.BlockStore {
  1274  	return n.blockStore
  1275  }
  1276  
  1277  // ConsensusState returns the Node's ConsensusState.
  1278  func (n *Node) ConsensusState() *cs.State {
  1279  	return n.consensusState
  1280  }
  1281  
  1282  // ConsensusReactor returns the Node's ConsensusReactor.
  1283  func (n *Node) ConsensusReactor() *cs.Reactor {
  1284  	return n.consensusReactor
  1285  }
  1286  
  1287  // MempoolReactor returns the Node's mempool reactor.
  1288  func (n *Node) MempoolReactor() p2p.Reactor {
  1289  	return n.mempoolReactor
  1290  }
  1291  
  1292  // Mempool returns the Node's mempool.
  1293  func (n *Node) Mempool() mempl.Mempool {
  1294  	return n.mempool
  1295  }
  1296  
  1297  // PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled.
  1298  func (n *Node) PEXReactor() *pex.Reactor {
  1299  	return n.pexReactor
  1300  }
  1301  
  1302  // EvidencePool returns the Node's EvidencePool.
  1303  func (n *Node) EvidencePool() *evidence.Pool {
  1304  	return n.evidencePool
  1305  }
  1306  
  1307  // EventBus returns the Node's EventBus.
  1308  func (n *Node) EventBus() *types.EventBus {
  1309  	return n.eventBus
  1310  }
  1311  
  1312  // PrivValidator returns the Node's PrivValidator.
  1313  // XXX: for convenience only!
  1314  func (n *Node) PrivValidator() types.PrivValidator {
  1315  	return n.privValidator
  1316  }
  1317  
  1318  // GenesisDoc returns the Node's GenesisDoc.
  1319  func (n *Node) GenesisDoc() *types.GenesisDoc {
  1320  	return n.genesisDoc
  1321  }
  1322  
  1323  // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
  1324  func (n *Node) ProxyApp() proxy.AppConns {
  1325  	return n.proxyApp
  1326  }
  1327  
  1328  // Config returns the Node's config.
  1329  func (n *Node) Config() *cfg.Config {
  1330  	return n.config
  1331  }
  1332  
  1333  //------------------------------------------------------------------------------
  1334  
  1335  func (n *Node) Listeners() []string {
  1336  	return []string{
  1337  		fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress),
  1338  	}
  1339  }
  1340  
  1341  func (n *Node) IsListening() bool {
  1342  	return n.isListening
  1343  }
  1344  
  1345  // NodeInfo returns the Node's Info from the Switch.
  1346  func (n *Node) NodeInfo() p2p.NodeInfo {
  1347  	return n.nodeInfo
  1348  }
  1349  
  1350  func makeNodeInfo(
  1351  	config *cfg.Config,
  1352  	nodeKey *p2p.NodeKey,
  1353  	txIndexer txindex.TxIndexer,
  1354  	genDoc *types.GenesisDoc,
  1355  	state sm.State,
  1356  ) (p2p.NodeInfo, error) {
  1357  	txIndexerStatus := "on"
  1358  	if _, ok := txIndexer.(*null.TxIndex); ok {
  1359  		txIndexerStatus = "off"
  1360  	}
  1361  
  1362  	var bcChannel byte
  1363  	switch config.FastSync.Version {
  1364  	case "v0":
  1365  		bcChannel = bcv0.BlockchainChannel
  1366  	case "v1":
  1367  		bcChannel = bcv1.BlockchainChannel
  1368  	case "v2":
  1369  		bcChannel = bcv2.BlockchainChannel
  1370  	default:
  1371  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
  1372  	}
  1373  
  1374  	nodeInfo := p2p.DefaultNodeInfo{
  1375  		ProtocolVersion: p2p.NewProtocolVersion(
  1376  			version.P2PProtocol, // global
  1377  			state.Version.Consensus.Block,
  1378  			state.Version.Consensus.App,
  1379  		),
  1380  		DefaultNodeID: nodeKey.ID(),
  1381  		Network:       genDoc.ChainID,
  1382  		Version:       version.TMCoreSemVer,
  1383  		Channels: []byte{
  1384  			bcChannel,
  1385  			cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
  1386  			mempl.MempoolChannel,
  1387  			evidence.EvidenceChannel,
  1388  			statesync.SnapshotChannel, statesync.ChunkChannel,
  1389  		},
  1390  		Moniker: config.Moniker,
  1391  		Other: p2p.DefaultNodeInfoOther{
  1392  			TxIndex:    txIndexerStatus,
  1393  			RPCAddress: config.RPC.ListenAddress,
  1394  		},
  1395  	}
  1396  
  1397  	if config.P2P.PexReactor {
  1398  		nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  1399  	}
  1400  
  1401  	lAddr := config.P2P.ExternalAddress
  1402  
  1403  	if lAddr == "" {
  1404  		lAddr = config.P2P.ListenAddress
  1405  	}
  1406  
  1407  	nodeInfo.ListenAddr = lAddr
  1408  
  1409  	err := nodeInfo.Validate()
  1410  	return nodeInfo, err
  1411  }
  1412  
  1413  //------------------------------------------------------------------------------
  1414  
  1415  var genesisDocKey = []byte("genesisDoc")
  1416  
  1417  // LoadStateFromDBOrGenesisDocProvider attempts to load the state from the
  1418  // database, or creates one using the given genesisDocProvider and persists the
  1419  // result to the database. On success this also returns the genesis doc loaded
  1420  // through the given provider.
  1421  func LoadStateFromDBOrGenesisDocProvider(
  1422  	stateDB dbm.DB,
  1423  	genesisDocProvider GenesisDocProvider,
  1424  ) (sm.State, *types.GenesisDoc, error) {
  1425  	// Get genesis doc
  1426  	genDoc, err := loadGenesisDoc(stateDB)
  1427  	if err != nil {
  1428  		genDoc, err = genesisDocProvider()
  1429  		if err != nil {
  1430  			return sm.State{}, nil, err
  1431  		}
  1432  		// save genesis doc to prevent a certain class of user errors (e.g. when it
  1433  		// was changed, accidentally or not). Also good for audit trail.
  1434  		saveGenesisDoc(stateDB, genDoc)
  1435  	}
  1436  	stateStore := sm.NewStore(stateDB, sm.StoreOptions{
  1437  		DiscardABCIResponses: false,
  1438  	})
  1439  	state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
  1440  	if err != nil {
  1441  		return sm.State{}, nil, err
  1442  	}
  1443  	return state, genDoc, nil
  1444  }
  1445  
  1446  // panics if failed to unmarshal bytes
  1447  func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
  1448  	b, err := db.Get(genesisDocKey)
  1449  	if err != nil {
  1450  		panic(err)
  1451  	}
  1452  	if len(b) == 0 {
  1453  		return nil, errors.New("genesis doc not found")
  1454  	}
  1455  	var genDoc *types.GenesisDoc
  1456  	err = cmtjson.Unmarshal(b, &genDoc)
  1457  	if err != nil {
  1458  		panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b))
  1459  	}
  1460  	return genDoc, nil
  1461  }
  1462  
  1463  // panics if failed to marshal the given genesis document
  1464  func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) {
  1465  	b, err := cmtjson.Marshal(genDoc)
  1466  	if err != nil {
  1467  		panic(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
  1468  	}
  1469  	if err := db.SetSync(genesisDocKey, b); err != nil {
  1470  		panic(fmt.Sprintf("Failed to save genesis doc: %v", err))
  1471  	}
  1472  }
  1473  
  1474  func createAndStartPrivValidatorSocketClient(
  1475  	listenAddr,
  1476  	chainID string,
  1477  	logger log.Logger,
  1478  ) (types.PrivValidator, error) {
  1479  	pve, err := privval.NewSignerListener(listenAddr, logger)
  1480  	if err != nil {
  1481  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1482  	}
  1483  
  1484  	pvsc, err := privval.NewSignerClient(pve, chainID)
  1485  	if err != nil {
  1486  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1487  	}
  1488  
  1489  	// try to get a pubkey from private validate first time
  1490  	_, err = pvsc.GetPubKey()
  1491  	if err != nil {
  1492  		return nil, fmt.Errorf("can't get pubkey: %w", err)
  1493  	}
  1494  
  1495  	const (
  1496  		retries = 50 // 50 * 100ms = 5s total
  1497  		timeout = 100 * time.Millisecond
  1498  	)
  1499  	pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout)
  1500  
  1501  	return pvscWithRetries, nil
  1502  }
  1503  
  1504  // splitAndTrimEmpty slices s into all subslices separated by sep and returns a
  1505  // slice of the string s with all leading and trailing Unicode code points
  1506  // contained in cutset removed. If sep is empty, SplitAndTrim splits after each
  1507  // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
  1508  // -1.  also filter out empty strings, only return non-empty strings.
  1509  func splitAndTrimEmpty(s, sep, cutset string) []string {
  1510  	if s == "" {
  1511  		return []string{}
  1512  	}
  1513  
  1514  	spl := strings.Split(s, sep)
  1515  	nonEmptyStrings := make([]string, 0, len(spl))
  1516  	for i := 0; i < len(spl); i++ {
  1517  		element := strings.Trim(spl[i], cutset)
  1518  		if element != "" {
  1519  			nonEmptyStrings = append(nonEmptyStrings, element)
  1520  		}
  1521  	}
  1522  	return nonEmptyStrings
  1523  }