github.com/vipernet-xyz/tm@v0.34.24/node/node.go (about)

     1  package node
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"net"
     9  	"net/http"
    10  	"strings"
    11  	"time"
    12  
    13  	"github.com/prometheus/client_golang/prometheus"
    14  	"github.com/prometheus/client_golang/prometheus/promhttp"
    15  	"github.com/rs/cors"
    16  	dbm "github.com/tendermint/tm-db"
    17  
    18  	abci "github.com/vipernet-xyz/tm/abci/types"
    19  	bcv0 "github.com/vipernet-xyz/tm/blockchain/v0"
    20  	bcv1 "github.com/vipernet-xyz/tm/blockchain/v1"
    21  	bcv2 "github.com/vipernet-xyz/tm/blockchain/v2"
    22  	cfg "github.com/vipernet-xyz/tm/config"
    23  	cs "github.com/vipernet-xyz/tm/consensus"
    24  	"github.com/vipernet-xyz/tm/crypto"
    25  	"github.com/vipernet-xyz/tm/evidence"
    26  
    27  	tmjson "github.com/vipernet-xyz/tm/libs/json"
    28  	"github.com/vipernet-xyz/tm/libs/log"
    29  	tmpubsub "github.com/vipernet-xyz/tm/libs/pubsub"
    30  	"github.com/vipernet-xyz/tm/libs/service"
    31  	"github.com/vipernet-xyz/tm/light"
    32  	mempl "github.com/vipernet-xyz/tm/mempool"
    33  	mempoolv0 "github.com/vipernet-xyz/tm/mempool/v0"
    34  	mempoolv1 "github.com/vipernet-xyz/tm/mempool/v1"
    35  	"github.com/vipernet-xyz/tm/p2p"
    36  	"github.com/vipernet-xyz/tm/p2p/pex"
    37  	"github.com/vipernet-xyz/tm/privval"
    38  	"github.com/vipernet-xyz/tm/proxy"
    39  	rpccore "github.com/vipernet-xyz/tm/rpc/core"
    40  	grpccore "github.com/vipernet-xyz/tm/rpc/grpc"
    41  	rpcserver "github.com/vipernet-xyz/tm/rpc/jsonrpc/server"
    42  	sm "github.com/vipernet-xyz/tm/state"
    43  	"github.com/vipernet-xyz/tm/state/indexer"
    44  	blockidxkv "github.com/vipernet-xyz/tm/state/indexer/block/kv"
    45  	blockidxnull "github.com/vipernet-xyz/tm/state/indexer/block/null"
    46  	"github.com/vipernet-xyz/tm/state/indexer/sink/psql"
    47  	"github.com/vipernet-xyz/tm/state/txindex"
    48  	"github.com/vipernet-xyz/tm/state/txindex/kv"
    49  	"github.com/vipernet-xyz/tm/state/txindex/null"
    50  	"github.com/vipernet-xyz/tm/statesync"
    51  	"github.com/vipernet-xyz/tm/store"
    52  	"github.com/vipernet-xyz/tm/types"
    53  	tmtime "github.com/vipernet-xyz/tm/types/time"
    54  	"github.com/vipernet-xyz/tm/version"
    55  
    56  	_ "net/http/pprof" //nolint: gosec // securely exposed on separate, optional port
    57  
    58  	_ "github.com/lib/pq" // provide the psql db driver
    59  )
    60  
    61  //------------------------------------------------------------------------------
    62  
    63  // DBContext specifies config information for loading a new DB.
    64  type DBContext struct {
    65  	ID     string
    66  	Config *cfg.Config
    67  }
    68  
    69  // DBProvider takes a DBContext and returns an instantiated DB.
    70  type DBProvider func(*DBContext) (dbm.DB, error)
    71  
    72  const readHeaderTimeout = 10 * time.Second
    73  
    74  // DefaultDBProvider returns a database using the DBBackend and DBDir
    75  // specified in the ctx.Config.
    76  func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
    77  	dbType := dbm.BackendType(ctx.Config.DBBackend)
    78  	return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
    79  }
    80  
    81  // GenesisDocProvider returns a GenesisDoc.
    82  // It allows the GenesisDoc to be pulled from sources other than the
    83  // filesystem, for instance from a distributed key-value store cluster.
    84  type GenesisDocProvider func() (*types.GenesisDoc, error)
    85  
    86  // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
    87  // the GenesisDoc from the config.GenesisFile() on the filesystem.
    88  func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
    89  	return func() (*types.GenesisDoc, error) {
    90  		return types.GenesisDocFromFile(config.GenesisFile())
    91  	}
    92  }
    93  
    94  // Provider takes a config and a logger and returns a ready to go Node.
    95  type Provider func(*cfg.Config, log.Logger) (*Node, error)
    96  
    97  // DefaultNewNode returns a Tendermint node with default settings for the
    98  // PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
    99  // It implements NodeProvider.
   100  func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
   101  	nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
   102  	if err != nil {
   103  		return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err)
   104  	}
   105  
   106  	return NewNode(config,
   107  		privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()),
   108  		nodeKey,
   109  		proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
   110  		DefaultGenesisDocProviderFunc(config),
   111  		DefaultDBProvider,
   112  		DefaultMetricsProvider(config.Instrumentation),
   113  		logger,
   114  	)
   115  }
   116  
   117  // MetricsProvider returns a consensus, p2p and mempool Metrics.
   118  type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics)
   119  
   120  // DefaultMetricsProvider returns Metrics build using Prometheus client library
   121  // if Prometheus is enabled. Otherwise, it returns no-op Metrics.
   122  func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
   123  	return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) {
   124  		if config.Prometheus {
   125  			return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   126  				p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   127  				mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   128  				sm.PrometheusMetrics(config.Namespace, "chain_id", chainID)
   129  		}
   130  		return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics()
   131  	}
   132  }
   133  
   134  // Option sets a parameter for the node.
   135  type Option func(*Node)
   136  
   137  // Temporary interface for switching to fast sync, we should get rid of v0 and v1 reactors.
   138  // See: https://github.com/vipernet-xyz/tm/issues/4595
   139  type fastSyncReactor interface {
   140  	SwitchToFastSync(sm.State) error
   141  }
   142  
   143  // CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to
   144  // the node's Switch.
   145  //
   146  // WARNING: using any name from the below list of the existing reactors will
   147  // result in replacing it with the custom one.
   148  //
   149  //   - MEMPOOL
   150  //   - BLOCKCHAIN
   151  //   - CONSENSUS
   152  //   - EVIDENCE
   153  //   - PEX
   154  //   - STATESYNC
   155  func CustomReactors(reactors map[string]p2p.Reactor) Option {
   156  	return func(n *Node) {
   157  		for name, reactor := range reactors {
   158  			if existingReactor := n.sw.Reactor(name); existingReactor != nil {
   159  				n.sw.Logger.Info("Replacing existing reactor with a custom one",
   160  					"name", name, "existing", existingReactor, "custom", reactor)
   161  				n.sw.RemoveReactor(name, existingReactor)
   162  			}
   163  			n.sw.AddReactor(name, reactor)
   164  			// register the new channels to the nodeInfo
   165  			// NOTE: This is a bit messy now with the type casting but is
   166  			// cleaned up in the following version when NodeInfo is changed from
   167  			// and interface to a concrete type
   168  			if ni, ok := n.nodeInfo.(p2p.DefaultNodeInfo); ok {
   169  				for _, chDesc := range reactor.GetChannels() {
   170  					if !ni.HasChannel(chDesc.ID) {
   171  						ni.Channels = append(ni.Channels, chDesc.ID)
   172  						n.transport.AddChannel(chDesc.ID)
   173  					}
   174  				}
   175  				n.nodeInfo = ni
   176  			} else {
   177  				n.Logger.Error("Node info is not of type DefaultNodeInfo. Custom reactor channels can not be added.")
   178  			}
   179  		}
   180  	}
   181  }
   182  
   183  // StateProvider overrides the state provider used by state sync to retrieve trusted app hashes and
   184  // build a State object for bootstrapping the node.
   185  // WARNING: this interface is considered unstable and subject to change.
   186  func StateProvider(stateProvider statesync.StateProvider) Option {
   187  	return func(n *Node) {
   188  		n.stateSyncProvider = stateProvider
   189  	}
   190  }
   191  
   192  //------------------------------------------------------------------------------
   193  
   194  // Node is the highest level interface to a full Tendermint node.
   195  // It includes all configuration information and running services.
   196  type Node struct {
   197  	service.BaseService
   198  
   199  	// config
   200  	config        *cfg.Config
   201  	genesisDoc    *types.GenesisDoc   // initial validator set
   202  	privValidator types.PrivValidator // local node's validator key
   203  
   204  	// network
   205  	transport   *p2p.MultiplexTransport
   206  	sw          *p2p.Switch  // p2p connections
   207  	addrBook    pex.AddrBook // known peers
   208  	nodeInfo    p2p.NodeInfo
   209  	nodeKey     *p2p.NodeKey // our node privkey
   210  	isListening bool
   211  
   212  	// services
   213  	eventBus          *types.EventBus // pub/sub for services
   214  	stateStore        sm.Store
   215  	blockStore        *store.BlockStore // store the blockchain to disk
   216  	bcReactor         p2p.Reactor       // for fast-syncing
   217  	mempoolReactor    p2p.Reactor       // for gossipping transactions
   218  	mempool           mempl.Mempool
   219  	stateSync         bool                    // whether the node should state sync on startup
   220  	stateSyncReactor  *statesync.Reactor      // for hosting and restoring state sync snapshots
   221  	stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node
   222  	stateSyncGenesis  sm.State                // provides the genesis state for state sync
   223  	consensusState    *cs.State               // latest consensus state
   224  	consensusReactor  *cs.Reactor             // for participating in the consensus
   225  	pexReactor        *pex.Reactor            // for exchanging peer addresses
   226  	evidencePool      *evidence.Pool          // tracking evidence
   227  	proxyApp          proxy.AppConns          // connection to the application
   228  	rpcListeners      []net.Listener          // rpc servers
   229  	txIndexer         txindex.TxIndexer
   230  	blockIndexer      indexer.BlockIndexer
   231  	indexerService    *txindex.IndexerService
   232  	prometheusSrv     *http.Server
   233  }
   234  
   235  func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
   236  	var blockStoreDB dbm.DB
   237  	blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
   238  	if err != nil {
   239  		return
   240  	}
   241  	blockStore = store.NewBlockStore(blockStoreDB)
   242  
   243  	stateDB, err = dbProvider(&DBContext{"state", config})
   244  	if err != nil {
   245  		return
   246  	}
   247  
   248  	return
   249  }
   250  
   251  func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) {
   252  	proxyApp := proxy.NewAppConns(clientCreator)
   253  	proxyApp.SetLogger(logger.With("module", "proxy"))
   254  	if err := proxyApp.Start(); err != nil {
   255  		return nil, fmt.Errorf("error starting proxy app connections: %v", err)
   256  	}
   257  	return proxyApp, nil
   258  }
   259  
   260  func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
   261  	eventBus := types.NewEventBus()
   262  	eventBus.SetLogger(logger.With("module", "events"))
   263  	if err := eventBus.Start(); err != nil {
   264  		return nil, err
   265  	}
   266  	return eventBus, nil
   267  }
   268  
   269  func createAndStartIndexerService(
   270  	config *cfg.Config,
   271  	chainID string,
   272  	dbProvider DBProvider,
   273  	eventBus *types.EventBus,
   274  	logger log.Logger,
   275  ) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) {
   276  	var (
   277  		txIndexer    txindex.TxIndexer
   278  		blockIndexer indexer.BlockIndexer
   279  	)
   280  
   281  	switch config.TxIndex.Indexer {
   282  	case "kv":
   283  		store, err := dbProvider(&DBContext{"tx_index", config})
   284  		if err != nil {
   285  			return nil, nil, nil, err
   286  		}
   287  
   288  		txIndexer = kv.NewTxIndex(store)
   289  		blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events")))
   290  
   291  	case "psql":
   292  		if config.TxIndex.PsqlConn == "" {
   293  			return nil, nil, nil, errors.New(`no psql-conn is set for the "psql" indexer`)
   294  		}
   295  		es, err := psql.NewEventSink(config.TxIndex.PsqlConn, chainID)
   296  		if err != nil {
   297  			return nil, nil, nil, fmt.Errorf("creating psql indexer: %w", err)
   298  		}
   299  		txIndexer = es.TxIndexer()
   300  		blockIndexer = es.BlockIndexer()
   301  
   302  	default:
   303  		txIndexer = &null.TxIndex{}
   304  		blockIndexer = &blockidxnull.BlockerIndexer{}
   305  	}
   306  
   307  	indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus, false)
   308  	indexerService.SetLogger(logger.With("module", "txindex"))
   309  
   310  	if err := indexerService.Start(); err != nil {
   311  		return nil, nil, nil, err
   312  	}
   313  
   314  	return indexerService, txIndexer, blockIndexer, nil
   315  }
   316  
   317  func doHandshake(
   318  	stateStore sm.Store,
   319  	state sm.State,
   320  	blockStore sm.BlockStore,
   321  	genDoc *types.GenesisDoc,
   322  	eventBus types.BlockEventPublisher,
   323  	proxyApp proxy.AppConns,
   324  	consensusLogger log.Logger,
   325  ) error {
   326  	handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc)
   327  	handshaker.SetLogger(consensusLogger)
   328  	handshaker.SetEventBus(eventBus)
   329  	if err := handshaker.Handshake(proxyApp); err != nil {
   330  		return fmt.Errorf("error during handshake: %v", err)
   331  	}
   332  	return nil
   333  }
   334  
   335  func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) {
   336  	// Log the version info.
   337  	logger.Info("Version info",
   338  		"tendermint_version", version.TMCoreSemVer,
   339  		"block", version.BlockProtocol,
   340  		"p2p", version.P2PProtocol,
   341  	)
   342  
   343  	// If the state and software differ in block version, at least log it.
   344  	if state.Version.Consensus.Block != version.BlockProtocol {
   345  		logger.Info("Software and state have different block protocols",
   346  			"software", version.BlockProtocol,
   347  			"state", state.Version.Consensus.Block,
   348  		)
   349  	}
   350  
   351  	addr := pubKey.Address()
   352  	// Log whether this node is a validator or an observer
   353  	if state.Validators.HasAddress(addr) {
   354  		consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey)
   355  	} else {
   356  		consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
   357  	}
   358  }
   359  
   360  func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
   361  	if state.Validators.Size() > 1 {
   362  		return false
   363  	}
   364  	addr, _ := state.Validators.GetByIndex(0)
   365  	return bytes.Equal(pubKey.Address(), addr)
   366  }
   367  
   368  func createMempoolAndMempoolReactor(
   369  	config *cfg.Config,
   370  	proxyApp proxy.AppConns,
   371  	state sm.State,
   372  	memplMetrics *mempl.Metrics,
   373  	logger log.Logger,
   374  ) (mempl.Mempool, p2p.Reactor) {
   375  	switch config.Mempool.Version {
   376  	case cfg.MempoolV1:
   377  		mp := mempoolv1.NewTxMempool(
   378  			logger,
   379  			config.Mempool,
   380  			proxyApp.Mempool(),
   381  			state.LastBlockHeight,
   382  			mempoolv1.WithMetrics(memplMetrics),
   383  			mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
   384  			mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
   385  		)
   386  
   387  		reactor := mempoolv1.NewReactor(
   388  			config.Mempool,
   389  			mp,
   390  		)
   391  		if config.Consensus.WaitForTxs() {
   392  			mp.EnableTxsAvailable()
   393  		}
   394  
   395  		return mp, reactor
   396  
   397  	case cfg.MempoolV0:
   398  		mp := mempoolv0.NewCListMempool(
   399  			config.Mempool,
   400  			proxyApp.Mempool(),
   401  			state.LastBlockHeight,
   402  			mempoolv0.WithMetrics(memplMetrics),
   403  			mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
   404  			mempoolv0.WithPostCheck(sm.TxPostCheck(state)),
   405  		)
   406  
   407  		mp.SetLogger(logger)
   408  
   409  		reactor := mempoolv0.NewReactor(
   410  			config.Mempool,
   411  			mp,
   412  		)
   413  		if config.Consensus.WaitForTxs() {
   414  			mp.EnableTxsAvailable()
   415  		}
   416  
   417  		return mp, reactor
   418  
   419  	default:
   420  		return nil, nil
   421  	}
   422  }
   423  
   424  func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
   425  	stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger,
   426  ) (*evidence.Reactor, *evidence.Pool, error) {
   427  	evidenceDB, err := dbProvider(&DBContext{"evidence", config})
   428  	if err != nil {
   429  		return nil, nil, err
   430  	}
   431  	evidenceLogger := logger.With("module", "evidence")
   432  	evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB, sm.StoreOptions{
   433  		DiscardABCIResponses: config.Storage.DiscardABCIResponses,
   434  	}), blockStore)
   435  	if err != nil {
   436  		return nil, nil, err
   437  	}
   438  	evidenceReactor := evidence.NewReactor(evidencePool)
   439  	evidenceReactor.SetLogger(evidenceLogger)
   440  	return evidenceReactor, evidencePool, nil
   441  }
   442  
   443  func createBlockchainReactor(config *cfg.Config,
   444  	state sm.State,
   445  	blockExec *sm.BlockExecutor,
   446  	blockStore *store.BlockStore,
   447  	fastSync bool,
   448  	logger log.Logger,
   449  ) (bcReactor p2p.Reactor, err error) {
   450  	switch config.FastSync.Version {
   451  	case "v0":
   452  		bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   453  	case "v1":
   454  		bcReactor = bcv1.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   455  	case "v2":
   456  		bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   457  	default:
   458  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
   459  	}
   460  
   461  	bcReactor.SetLogger(logger.With("module", "blockchain"))
   462  	return bcReactor, nil
   463  }
   464  
   465  func createConsensusReactor(config *cfg.Config,
   466  	state sm.State,
   467  	blockExec *sm.BlockExecutor,
   468  	blockStore sm.BlockStore,
   469  	mempool mempl.Mempool,
   470  	evidencePool *evidence.Pool,
   471  	privValidator types.PrivValidator,
   472  	csMetrics *cs.Metrics,
   473  	waitSync bool,
   474  	eventBus *types.EventBus,
   475  	consensusLogger log.Logger,
   476  ) (*cs.Reactor, *cs.State) {
   477  	consensusState := cs.NewState(
   478  		config.Consensus,
   479  		state.Copy(),
   480  		blockExec,
   481  		blockStore,
   482  		mempool,
   483  		evidencePool,
   484  		cs.StateMetrics(csMetrics),
   485  	)
   486  	consensusState.SetLogger(consensusLogger)
   487  	if privValidator != nil {
   488  		consensusState.SetPrivValidator(privValidator)
   489  	}
   490  	consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics))
   491  	consensusReactor.SetLogger(consensusLogger)
   492  	// services which will be publishing and/or subscribing for messages (events)
   493  	// consensusReactor will set it on consensusState and blockExecutor
   494  	consensusReactor.SetEventBus(eventBus)
   495  	return consensusReactor, consensusState
   496  }
   497  
   498  func createTransport(
   499  	config *cfg.Config,
   500  	nodeInfo p2p.NodeInfo,
   501  	nodeKey *p2p.NodeKey,
   502  	proxyApp proxy.AppConns,
   503  ) (
   504  	*p2p.MultiplexTransport,
   505  	[]p2p.PeerFilterFunc,
   506  ) {
   507  	var (
   508  		mConnConfig = p2p.MConnConfig(config.P2P)
   509  		transport   = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig)
   510  		connFilters = []p2p.ConnFilterFunc{}
   511  		peerFilters = []p2p.PeerFilterFunc{}
   512  	)
   513  
   514  	if !config.P2P.AllowDuplicateIP {
   515  		connFilters = append(connFilters, p2p.ConnDuplicateIPFilter())
   516  	}
   517  
   518  	// Filter peers by addr or pubkey with an ABCI query.
   519  	// If the query return code is OK, add peer.
   520  	if config.FilterPeers {
   521  		connFilters = append(
   522  			connFilters,
   523  			// ABCI query for address filtering.
   524  			func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
   525  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   526  					Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
   527  				})
   528  				if err != nil {
   529  					return err
   530  				}
   531  				if res.IsErr() {
   532  					return fmt.Errorf("error querying abci app: %v", res)
   533  				}
   534  
   535  				return nil
   536  			},
   537  		)
   538  
   539  		peerFilters = append(
   540  			peerFilters,
   541  			// ABCI query for ID filtering.
   542  			func(_ p2p.IPeerSet, p p2p.Peer) error {
   543  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   544  					Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
   545  				})
   546  				if err != nil {
   547  					return err
   548  				}
   549  				if res.IsErr() {
   550  					return fmt.Errorf("error querying abci app: %v", res)
   551  				}
   552  
   553  				return nil
   554  			},
   555  		)
   556  	}
   557  
   558  	p2p.MultiplexTransportConnFilters(connFilters...)(transport)
   559  
   560  	// Limit the number of incoming connections.
   561  	max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   562  	p2p.MultiplexTransportMaxIncomingConnections(max)(transport)
   563  
   564  	return transport, peerFilters
   565  }
   566  
   567  func createSwitch(config *cfg.Config,
   568  	transport p2p.Transport,
   569  	p2pMetrics *p2p.Metrics,
   570  	peerFilters []p2p.PeerFilterFunc,
   571  	mempoolReactor p2p.Reactor,
   572  	bcReactor p2p.Reactor,
   573  	stateSyncReactor *statesync.Reactor,
   574  	consensusReactor *cs.Reactor,
   575  	evidenceReactor *evidence.Reactor,
   576  	nodeInfo p2p.NodeInfo,
   577  	nodeKey *p2p.NodeKey,
   578  	p2pLogger log.Logger,
   579  ) *p2p.Switch {
   580  	sw := p2p.NewSwitch(
   581  		config.P2P,
   582  		transport,
   583  		p2p.WithMetrics(p2pMetrics),
   584  		p2p.SwitchPeerFilters(peerFilters...),
   585  	)
   586  	sw.SetLogger(p2pLogger)
   587  	sw.AddReactor("MEMPOOL", mempoolReactor)
   588  	sw.AddReactor("BLOCKCHAIN", bcReactor)
   589  	sw.AddReactor("CONSENSUS", consensusReactor)
   590  	sw.AddReactor("EVIDENCE", evidenceReactor)
   591  	sw.AddReactor("STATESYNC", stateSyncReactor)
   592  
   593  	sw.SetNodeInfo(nodeInfo)
   594  	sw.SetNodeKey(nodeKey)
   595  
   596  	p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
   597  	return sw
   598  }
   599  
   600  func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
   601  	p2pLogger log.Logger, nodeKey *p2p.NodeKey,
   602  ) (pex.AddrBook, error) {
   603  	addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
   604  	addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
   605  
   606  	// Add ourselves to addrbook to prevent dialing ourselves
   607  	if config.P2P.ExternalAddress != "" {
   608  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress))
   609  		if err != nil {
   610  			return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err)
   611  		}
   612  		addrBook.AddOurAddress(addr)
   613  	}
   614  	if config.P2P.ListenAddress != "" {
   615  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress))
   616  		if err != nil {
   617  			return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err)
   618  		}
   619  		addrBook.AddOurAddress(addr)
   620  	}
   621  
   622  	sw.SetAddrBook(addrBook)
   623  
   624  	return addrBook, nil
   625  }
   626  
   627  func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
   628  	sw *p2p.Switch, logger log.Logger,
   629  ) *pex.Reactor {
   630  	// TODO persistent peers ? so we can have their DNS addrs saved
   631  	pexReactor := pex.NewReactor(addrBook,
   632  		&pex.ReactorConfig{
   633  			Seeds:    splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
   634  			SeedMode: config.P2P.SeedMode,
   635  			// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
   636  			// blocks assuming 10s blocks ~ 28 hours.
   637  			// TODO (melekes): make it dynamic based on the actual block latencies
   638  			// from the live network.
   639  			// https://github.com/vipernet-xyz/tm/issues/3523
   640  			SeedDisconnectWaitPeriod:     28 * time.Hour,
   641  			PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
   642  		})
   643  	pexReactor.SetLogger(logger.With("module", "pex"))
   644  	sw.AddReactor("PEX", pexReactor)
   645  	return pexReactor
   646  }
   647  
   648  // startStateSync starts an asynchronous state sync process, then switches to fast sync mode.
   649  func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor,
   650  	stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool,
   651  	stateStore sm.Store, blockStore *store.BlockStore, state sm.State,
   652  ) error {
   653  	ssR.Logger.Info("Starting state sync")
   654  
   655  	if stateProvider == nil {
   656  		var err error
   657  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   658  		defer cancel()
   659  		stateProvider, err = statesync.NewLightClientStateProvider(
   660  			ctx,
   661  			state.ChainID, state.Version, state.InitialHeight,
   662  			config.RPCServers, light.TrustOptions{
   663  				Period: config.TrustPeriod,
   664  				Height: config.TrustHeight,
   665  				Hash:   config.TrustHashBytes(),
   666  			}, ssR.Logger.With("module", "light"))
   667  		if err != nil {
   668  			return fmt.Errorf("failed to set up light client state provider: %w", err)
   669  		}
   670  	}
   671  
   672  	go func() {
   673  		state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime)
   674  		if err != nil {
   675  			ssR.Logger.Error("State sync failed", "err", err)
   676  			return
   677  		}
   678  		err = stateStore.Bootstrap(state)
   679  		if err != nil {
   680  			ssR.Logger.Error("Failed to bootstrap node with new state", "err", err)
   681  			return
   682  		}
   683  		err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit)
   684  		if err != nil {
   685  			ssR.Logger.Error("Failed to store last seen commit", "err", err)
   686  			return
   687  		}
   688  
   689  		if fastSync {
   690  			// FIXME Very ugly to have these metrics bleed through here.
   691  			conR.Metrics.StateSyncing.Set(0)
   692  			conR.Metrics.FastSyncing.Set(1)
   693  			err = bcR.SwitchToFastSync(state)
   694  			if err != nil {
   695  				ssR.Logger.Error("Failed to switch to fast sync", "err", err)
   696  				return
   697  			}
   698  		} else {
   699  			conR.SwitchToConsensus(state, true)
   700  		}
   701  	}()
   702  	return nil
   703  }
   704  
   705  // NewNode returns a new, ready to go, Tendermint Node.
   706  func NewNode(config *cfg.Config,
   707  	privValidator types.PrivValidator,
   708  	nodeKey *p2p.NodeKey,
   709  	clientCreator proxy.ClientCreator,
   710  	genesisDocProvider GenesisDocProvider,
   711  	dbProvider DBProvider,
   712  	metricsProvider MetricsProvider,
   713  	logger log.Logger,
   714  	options ...Option,
   715  ) (*Node, error) {
   716  	blockStore, stateDB, err := initDBs(config, dbProvider)
   717  	if err != nil {
   718  		return nil, err
   719  	}
   720  
   721  	stateStore := sm.NewStore(stateDB, sm.StoreOptions{
   722  		DiscardABCIResponses: config.Storage.DiscardABCIResponses,
   723  	})
   724  
   725  	state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
   726  	if err != nil {
   727  		return nil, err
   728  	}
   729  
   730  	// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
   731  	proxyApp, err := createAndStartProxyAppConns(clientCreator, logger)
   732  	if err != nil {
   733  		return nil, err
   734  	}
   735  
   736  	// EventBus and IndexerService must be started before the handshake because
   737  	// we might need to index the txs of the replayed block as this might not have happened
   738  	// when the node stopped last time (i.e. the node stopped after it saved the block
   739  	// but before it indexed the txs, or, endblocker panicked)
   740  	eventBus, err := createAndStartEventBus(logger)
   741  	if err != nil {
   742  		return nil, err
   743  	}
   744  
   745  	indexerService, txIndexer, blockIndexer, err := createAndStartIndexerService(config,
   746  		genDoc.ChainID, dbProvider, eventBus, logger)
   747  	if err != nil {
   748  		return nil, err
   749  	}
   750  
   751  	// If an address is provided, listen on the socket for a connection from an
   752  	// external signing process.
   753  	if config.PrivValidatorListenAddr != "" {
   754  		// FIXME: we should start services inside OnStart
   755  		privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, genDoc.ChainID, logger)
   756  		if err != nil {
   757  			return nil, fmt.Errorf("error with private validator socket client: %w", err)
   758  		}
   759  	}
   760  
   761  	pubKey, err := privValidator.GetPubKey()
   762  	if err != nil {
   763  		return nil, fmt.Errorf("can't get pubkey: %w", err)
   764  	}
   765  
   766  	// Determine whether we should attempt state sync.
   767  	stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey)
   768  	if stateSync && state.LastBlockHeight > 0 {
   769  		logger.Info("Found local state with non-zero height, skipping state sync")
   770  		stateSync = false
   771  	}
   772  
   773  	// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
   774  	// and replays any blocks as necessary to sync tendermint with the app.
   775  	consensusLogger := logger.With("module", "consensus")
   776  	if !stateSync {
   777  		if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil {
   778  			return nil, err
   779  		}
   780  
   781  		// Reload the state. It will have the Version.Consensus.App set by the
   782  		// Handshake, and may have other modifications as well (ie. depending on
   783  		// what happened during block replay).
   784  		state, err = stateStore.Load()
   785  		if err != nil {
   786  			return nil, fmt.Errorf("cannot load state: %w", err)
   787  		}
   788  	}
   789  
   790  	// Determine whether we should do fast sync. This must happen after the handshake, since the
   791  	// app may modify the validator set, specifying ourself as the only validator.
   792  	fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey)
   793  
   794  	logNodeStartupInfo(state, pubKey, logger, consensusLogger)
   795  
   796  	csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID)
   797  
   798  	// Make MempoolReactor
   799  	mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
   800  
   801  	// Make Evidence Reactor
   802  	evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger)
   803  	if err != nil {
   804  		return nil, err
   805  	}
   806  
   807  	// make block executor for consensus and blockchain reactors to execute blocks
   808  	blockExec := sm.NewBlockExecutor(
   809  		stateStore,
   810  		logger.With("module", "state"),
   811  		proxyApp.Consensus(),
   812  		mempool,
   813  		evidencePool,
   814  		sm.BlockExecutorWithMetrics(smMetrics),
   815  	)
   816  
   817  	// Make BlockchainReactor. Don't start fast sync if we're doing a state sync first.
   818  	bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync && !stateSync, logger)
   819  	if err != nil {
   820  		return nil, fmt.Errorf("could not create blockchain reactor: %w", err)
   821  	}
   822  
   823  	// Make ConsensusReactor. Don't enable fully if doing a state sync and/or fast sync first.
   824  	// FIXME We need to update metrics here, since other reactors don't have access to them.
   825  	if stateSync {
   826  		csMetrics.StateSyncing.Set(1)
   827  	} else if fastSync {
   828  		csMetrics.FastSyncing.Set(1)
   829  	}
   830  	consensusReactor, consensusState := createConsensusReactor(
   831  		config, state, blockExec, blockStore, mempool, evidencePool,
   832  		privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger,
   833  	)
   834  
   835  	// Set up state sync reactor, and schedule a sync if requested.
   836  	// FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy,
   837  	// we should clean this whole thing up. See:
   838  	// https://github.com/vipernet-xyz/tm/issues/4644
   839  	stateSyncReactor := statesync.NewReactor(
   840  		*config.StateSync,
   841  		proxyApp.Snapshot(),
   842  		proxyApp.Query(),
   843  		config.StateSync.TempDir,
   844  	)
   845  	stateSyncReactor.SetLogger(logger.With("module", "statesync"))
   846  
   847  	nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state)
   848  	if err != nil {
   849  		return nil, err
   850  	}
   851  
   852  	// Setup Transport.
   853  	transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp)
   854  
   855  	// Setup Switch.
   856  	p2pLogger := logger.With("module", "p2p")
   857  	sw := createSwitch(
   858  		config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor,
   859  		stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger,
   860  	)
   861  
   862  	err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
   863  	if err != nil {
   864  		return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err)
   865  	}
   866  
   867  	err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   868  	if err != nil {
   869  		return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
   870  	}
   871  
   872  	addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
   873  	if err != nil {
   874  		return nil, fmt.Errorf("could not create addrbook: %w", err)
   875  	}
   876  
   877  	// Optionally, start the pex reactor
   878  	//
   879  	// TODO:
   880  	//
   881  	// We need to set Seeds and PersistentPeers on the switch,
   882  	// since it needs to be able to use these (and their DNS names)
   883  	// even if the PEX is off. We can include the DNS name in the NetAddress,
   884  	// but it would still be nice to have a clear list of the current "PersistentPeers"
   885  	// somewhere that we can return with net_info.
   886  	//
   887  	// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
   888  	// Note we currently use the addrBook regardless at least for AddOurAddress
   889  	var pexReactor *pex.Reactor
   890  	if config.P2P.PexReactor {
   891  		pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
   892  	}
   893  
   894  	if config.RPC.PprofListenAddress != "" {
   895  		go func() {
   896  			logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress)
   897  			//nolint:gosec,nolintlint // G114: Use of net/http serve function that has no support for setting timeouts
   898  			logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil))
   899  		}()
   900  	}
   901  
   902  	node := &Node{
   903  		config:        config,
   904  		genesisDoc:    genDoc,
   905  		privValidator: privValidator,
   906  
   907  		transport: transport,
   908  		sw:        sw,
   909  		addrBook:  addrBook,
   910  		nodeInfo:  nodeInfo,
   911  		nodeKey:   nodeKey,
   912  
   913  		stateStore:       stateStore,
   914  		blockStore:       blockStore,
   915  		bcReactor:        bcReactor,
   916  		mempoolReactor:   mempoolReactor,
   917  		mempool:          mempool,
   918  		consensusState:   consensusState,
   919  		consensusReactor: consensusReactor,
   920  		stateSyncReactor: stateSyncReactor,
   921  		stateSync:        stateSync,
   922  		stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state
   923  		pexReactor:       pexReactor,
   924  		evidencePool:     evidencePool,
   925  		proxyApp:         proxyApp,
   926  		txIndexer:        txIndexer,
   927  		indexerService:   indexerService,
   928  		blockIndexer:     blockIndexer,
   929  		eventBus:         eventBus,
   930  	}
   931  	node.BaseService = *service.NewBaseService(logger, "Node", node)
   932  
   933  	for _, option := range options {
   934  		option(node)
   935  	}
   936  
   937  	return node, nil
   938  }
   939  
   940  // OnStart starts the Node. It implements service.Service.
   941  func (n *Node) OnStart() error {
   942  	now := tmtime.Now()
   943  	genTime := n.genesisDoc.GenesisTime
   944  	if genTime.After(now) {
   945  		n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime)
   946  		time.Sleep(genTime.Sub(now))
   947  	}
   948  
   949  	// Add private IDs to addrbook to block those peers being added
   950  	n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
   951  
   952  	// Start the RPC server before the P2P server
   953  	// so we can eg. receive txs for the first block
   954  	if n.config.RPC.ListenAddress != "" {
   955  		listeners, err := n.startRPC()
   956  		if err != nil {
   957  			return err
   958  		}
   959  		n.rpcListeners = listeners
   960  	}
   961  
   962  	if n.config.Instrumentation.Prometheus &&
   963  		n.config.Instrumentation.PrometheusListenAddr != "" {
   964  		n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
   965  	}
   966  
   967  	// Start the transport.
   968  	addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress))
   969  	if err != nil {
   970  		return err
   971  	}
   972  	if err := n.transport.Listen(*addr); err != nil {
   973  		return err
   974  	}
   975  
   976  	n.isListening = true
   977  
   978  	// Start the switch (the P2P server).
   979  	err = n.sw.Start()
   980  	if err != nil {
   981  		return err
   982  	}
   983  
   984  	// Always connect to persistent peers
   985  	err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
   986  	if err != nil {
   987  		return fmt.Errorf("could not dial peers from persistent_peers field: %w", err)
   988  	}
   989  
   990  	// Run state sync
   991  	if n.stateSync {
   992  		bcR, ok := n.bcReactor.(fastSyncReactor)
   993  		if !ok {
   994  			return fmt.Errorf("this blockchain reactor does not support switching from state sync")
   995  		}
   996  		err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider,
   997  			n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis)
   998  		if err != nil {
   999  			return fmt.Errorf("failed to start state sync: %w", err)
  1000  		}
  1001  	}
  1002  
  1003  	return nil
  1004  }
  1005  
  1006  // OnStop stops the Node. It implements service.Service.
  1007  func (n *Node) OnStop() {
  1008  	n.BaseService.OnStop()
  1009  
  1010  	n.Logger.Info("Stopping Node")
  1011  
  1012  	// first stop the non-reactor services
  1013  	if err := n.eventBus.Stop(); err != nil {
  1014  		n.Logger.Error("Error closing eventBus", "err", err)
  1015  	}
  1016  	if err := n.indexerService.Stop(); err != nil {
  1017  		n.Logger.Error("Error closing indexerService", "err", err)
  1018  	}
  1019  
  1020  	// now stop the reactors
  1021  	if err := n.sw.Stop(); err != nil {
  1022  		n.Logger.Error("Error closing switch", "err", err)
  1023  	}
  1024  
  1025  	if err := n.transport.Close(); err != nil {
  1026  		n.Logger.Error("Error closing transport", "err", err)
  1027  	}
  1028  
  1029  	n.isListening = false
  1030  
  1031  	// finally stop the listeners / external services
  1032  	for _, l := range n.rpcListeners {
  1033  		n.Logger.Info("Closing rpc listener", "listener", l)
  1034  		if err := l.Close(); err != nil {
  1035  			n.Logger.Error("Error closing listener", "listener", l, "err", err)
  1036  		}
  1037  	}
  1038  
  1039  	if pvsc, ok := n.privValidator.(service.Service); ok {
  1040  		if err := pvsc.Stop(); err != nil {
  1041  			n.Logger.Error("Error closing private validator", "err", err)
  1042  		}
  1043  	}
  1044  
  1045  	if n.prometheusSrv != nil {
  1046  		if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
  1047  			// Error from closing listeners, or context timeout:
  1048  			n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
  1049  		}
  1050  	}
  1051  	if n.blockStore != nil {
  1052  		if err := n.blockStore.Close(); err != nil {
  1053  			n.Logger.Error("problem closing blockstore", "err", err)
  1054  		}
  1055  	}
  1056  	if n.stateStore != nil {
  1057  		if err := n.stateStore.Close(); err != nil {
  1058  			n.Logger.Error("problem closing statestore", "err", err)
  1059  		}
  1060  	}
  1061  }
  1062  
  1063  // ConfigureRPC makes sure RPC has all the objects it needs to operate.
  1064  func (n *Node) ConfigureRPC() error {
  1065  	pubKey, err := n.privValidator.GetPubKey()
  1066  	if err != nil {
  1067  		return fmt.Errorf("can't get pubkey: %w", err)
  1068  	}
  1069  	rpccore.SetEnvironment(&rpccore.Environment{
  1070  		ProxyAppQuery:   n.proxyApp.Query(),
  1071  		ProxyAppMempool: n.proxyApp.Mempool(),
  1072  
  1073  		StateStore:     n.stateStore,
  1074  		BlockStore:     n.blockStore,
  1075  		EvidencePool:   n.evidencePool,
  1076  		ConsensusState: n.consensusState,
  1077  		P2PPeers:       n.sw,
  1078  		P2PTransport:   n,
  1079  
  1080  		PubKey:           pubKey,
  1081  		GenDoc:           n.genesisDoc,
  1082  		TxIndexer:        n.txIndexer,
  1083  		BlockIndexer:     n.blockIndexer,
  1084  		ConsensusReactor: n.consensusReactor,
  1085  		EventBus:         n.eventBus,
  1086  		Mempool:          n.mempool,
  1087  
  1088  		Logger: n.Logger.With("module", "rpc"),
  1089  
  1090  		Config: *n.config.RPC,
  1091  	})
  1092  	if err := rpccore.InitGenesisChunks(); err != nil {
  1093  		return err
  1094  	}
  1095  
  1096  	return nil
  1097  }
  1098  
  1099  func (n *Node) startRPC() ([]net.Listener, error) {
  1100  	err := n.ConfigureRPC()
  1101  	if err != nil {
  1102  		return nil, err
  1103  	}
  1104  
  1105  	listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
  1106  
  1107  	if n.config.RPC.Unsafe {
  1108  		rpccore.AddUnsafeRoutes()
  1109  	}
  1110  
  1111  	config := rpcserver.DefaultConfig()
  1112  	config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1113  	config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1114  	config.MaxOpenConnections = n.config.RPC.MaxOpenConnections
  1115  	// If necessary adjust global WriteTimeout to ensure it's greater than
  1116  	// TimeoutBroadcastTxCommit.
  1117  	// See https://github.com/vipernet-xyz/tm/issues/3435
  1118  	if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1119  		config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1120  	}
  1121  
  1122  	// we may expose the rpc over both a unix and tcp socket
  1123  	listeners := make([]net.Listener, len(listenAddrs))
  1124  	for i, listenAddr := range listenAddrs {
  1125  		mux := http.NewServeMux()
  1126  		rpcLogger := n.Logger.With("module", "rpc-server")
  1127  		wmLogger := rpcLogger.With("protocol", "websocket")
  1128  		wm := rpcserver.NewWebsocketManager(rpccore.Routes,
  1129  			rpcserver.OnDisconnect(func(remoteAddr string) {
  1130  				err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr)
  1131  				if err != nil && err != tmpubsub.ErrSubscriptionNotFound {
  1132  					wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err)
  1133  				}
  1134  			}),
  1135  			rpcserver.ReadLimit(config.MaxBodyBytes),
  1136  			rpcserver.WriteChanCapacity(n.config.RPC.WebSocketWriteBufferSize),
  1137  		)
  1138  		wm.SetLogger(wmLogger)
  1139  		mux.HandleFunc("/websocket", wm.WebsocketHandler)
  1140  		rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger)
  1141  		listener, err := rpcserver.Listen(
  1142  			listenAddr,
  1143  			config,
  1144  		)
  1145  		if err != nil {
  1146  			return nil, err
  1147  		}
  1148  
  1149  		var rootHandler http.Handler = mux
  1150  		if n.config.RPC.IsCorsEnabled() {
  1151  			corsMiddleware := cors.New(cors.Options{
  1152  				AllowedOrigins: n.config.RPC.CORSAllowedOrigins,
  1153  				AllowedMethods: n.config.RPC.CORSAllowedMethods,
  1154  				AllowedHeaders: n.config.RPC.CORSAllowedHeaders,
  1155  			})
  1156  			rootHandler = corsMiddleware.Handler(mux)
  1157  		}
  1158  		if n.config.RPC.IsTLSEnabled() {
  1159  			go func() {
  1160  				if err := rpcserver.ServeTLS(
  1161  					listener,
  1162  					rootHandler,
  1163  					n.config.RPC.CertFile(),
  1164  					n.config.RPC.KeyFile(),
  1165  					rpcLogger,
  1166  					config,
  1167  				); err != nil {
  1168  					n.Logger.Error("Error serving server with TLS", "err", err)
  1169  				}
  1170  			}()
  1171  		} else {
  1172  			go func() {
  1173  				if err := rpcserver.Serve(
  1174  					listener,
  1175  					rootHandler,
  1176  					rpcLogger,
  1177  					config,
  1178  				); err != nil {
  1179  					n.Logger.Error("Error serving server", "err", err)
  1180  				}
  1181  			}()
  1182  		}
  1183  
  1184  		listeners[i] = listener
  1185  	}
  1186  
  1187  	// we expose a simplified api over grpc for convenience to app devs
  1188  	grpcListenAddr := n.config.RPC.GRPCListenAddress
  1189  	if grpcListenAddr != "" {
  1190  		config := rpcserver.DefaultConfig()
  1191  		config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1192  		config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1193  		// NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections
  1194  		config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections
  1195  		// If necessary adjust global WriteTimeout to ensure it's greater than
  1196  		// TimeoutBroadcastTxCommit.
  1197  		// See https://github.com/vipernet-xyz/tm/issues/3435
  1198  		if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1199  			config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1200  		}
  1201  		listener, err := rpcserver.Listen(grpcListenAddr, config)
  1202  		if err != nil {
  1203  			return nil, err
  1204  		}
  1205  		go func() {
  1206  			if err := grpccore.StartGRPCServer(listener); err != nil {
  1207  				n.Logger.Error("Error starting gRPC server", "err", err)
  1208  			}
  1209  		}()
  1210  		listeners = append(listeners, listener)
  1211  
  1212  	}
  1213  
  1214  	return listeners, nil
  1215  }
  1216  
  1217  // startPrometheusServer starts a Prometheus HTTP server, listening for metrics
  1218  // collectors on addr.
  1219  func (n *Node) startPrometheusServer(addr string) *http.Server {
  1220  	srv := &http.Server{
  1221  		Addr: addr,
  1222  		Handler: promhttp.InstrumentMetricHandler(
  1223  			prometheus.DefaultRegisterer, promhttp.HandlerFor(
  1224  				prometheus.DefaultGatherer,
  1225  				promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
  1226  			),
  1227  		),
  1228  		ReadHeaderTimeout: readHeaderTimeout,
  1229  	}
  1230  	go func() {
  1231  		if err := srv.ListenAndServe(); err != http.ErrServerClosed {
  1232  			// Error starting or closing listener:
  1233  			n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
  1234  		}
  1235  	}()
  1236  	return srv
  1237  }
  1238  
  1239  // Switch returns the Node's Switch.
  1240  func (n *Node) Switch() *p2p.Switch {
  1241  	return n.sw
  1242  }
  1243  
  1244  // BlockStore returns the Node's BlockStore.
  1245  func (n *Node) BlockStore() *store.BlockStore {
  1246  	return n.blockStore
  1247  }
  1248  
  1249  // ConsensusState returns the Node's ConsensusState.
  1250  func (n *Node) ConsensusState() *cs.State {
  1251  	return n.consensusState
  1252  }
  1253  
  1254  // ConsensusReactor returns the Node's ConsensusReactor.
  1255  func (n *Node) ConsensusReactor() *cs.Reactor {
  1256  	return n.consensusReactor
  1257  }
  1258  
  1259  // MempoolReactor returns the Node's mempool reactor.
  1260  func (n *Node) MempoolReactor() p2p.Reactor {
  1261  	return n.mempoolReactor
  1262  }
  1263  
  1264  // Mempool returns the Node's mempool.
  1265  func (n *Node) Mempool() mempl.Mempool {
  1266  	return n.mempool
  1267  }
  1268  
  1269  // PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled.
  1270  func (n *Node) PEXReactor() *pex.Reactor {
  1271  	return n.pexReactor
  1272  }
  1273  
  1274  // EvidencePool returns the Node's EvidencePool.
  1275  func (n *Node) EvidencePool() *evidence.Pool {
  1276  	return n.evidencePool
  1277  }
  1278  
  1279  // EventBus returns the Node's EventBus.
  1280  func (n *Node) EventBus() *types.EventBus {
  1281  	return n.eventBus
  1282  }
  1283  
  1284  // PrivValidator returns the Node's PrivValidator.
  1285  // XXX: for convenience only!
  1286  func (n *Node) PrivValidator() types.PrivValidator {
  1287  	return n.privValidator
  1288  }
  1289  
  1290  // GenesisDoc returns the Node's GenesisDoc.
  1291  func (n *Node) GenesisDoc() *types.GenesisDoc {
  1292  	return n.genesisDoc
  1293  }
  1294  
  1295  // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
  1296  func (n *Node) ProxyApp() proxy.AppConns {
  1297  	return n.proxyApp
  1298  }
  1299  
  1300  // Config returns the Node's config.
  1301  func (n *Node) Config() *cfg.Config {
  1302  	return n.config
  1303  }
  1304  
  1305  //------------------------------------------------------------------------------
  1306  
  1307  func (n *Node) Listeners() []string {
  1308  	return []string{
  1309  		fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress),
  1310  	}
  1311  }
  1312  
  1313  func (n *Node) IsListening() bool {
  1314  	return n.isListening
  1315  }
  1316  
  1317  // NodeInfo returns the Node's Info from the Switch.
  1318  func (n *Node) NodeInfo() p2p.NodeInfo {
  1319  	return n.nodeInfo
  1320  }
  1321  
  1322  func makeNodeInfo(
  1323  	config *cfg.Config,
  1324  	nodeKey *p2p.NodeKey,
  1325  	txIndexer txindex.TxIndexer,
  1326  	genDoc *types.GenesisDoc,
  1327  	state sm.State,
  1328  ) (p2p.DefaultNodeInfo, error) {
  1329  	txIndexerStatus := "on"
  1330  	if _, ok := txIndexer.(*null.TxIndex); ok {
  1331  		txIndexerStatus = "off"
  1332  	}
  1333  
  1334  	var bcChannel byte
  1335  	switch config.FastSync.Version {
  1336  	case "v0":
  1337  		bcChannel = bcv0.BlockchainChannel
  1338  	case "v1":
  1339  		bcChannel = bcv1.BlockchainChannel
  1340  	case "v2":
  1341  		bcChannel = bcv2.BlockchainChannel
  1342  	default:
  1343  		return p2p.DefaultNodeInfo{}, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
  1344  	}
  1345  
  1346  	nodeInfo := p2p.DefaultNodeInfo{
  1347  		ProtocolVersion: p2p.NewProtocolVersion(
  1348  			version.P2PProtocol, // global
  1349  			state.Version.Consensus.Block,
  1350  			state.Version.Consensus.App,
  1351  		),
  1352  		DefaultNodeID: nodeKey.ID(),
  1353  		Network:       genDoc.ChainID,
  1354  		Version:       version.TMCoreSemVer,
  1355  		Channels: []byte{
  1356  			bcChannel,
  1357  			cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
  1358  			mempl.MempoolChannel,
  1359  			evidence.EvidenceChannel,
  1360  			statesync.SnapshotChannel, statesync.ChunkChannel,
  1361  		},
  1362  		Moniker: config.Moniker,
  1363  		Other: p2p.DefaultNodeInfoOther{
  1364  			TxIndex:    txIndexerStatus,
  1365  			RPCAddress: config.RPC.ListenAddress,
  1366  		},
  1367  	}
  1368  
  1369  	if config.P2P.PexReactor {
  1370  		nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  1371  	}
  1372  
  1373  	lAddr := config.P2P.ExternalAddress
  1374  
  1375  	if lAddr == "" {
  1376  		lAddr = config.P2P.ListenAddress
  1377  	}
  1378  
  1379  	nodeInfo.ListenAddr = lAddr
  1380  
  1381  	err := nodeInfo.Validate()
  1382  	return nodeInfo, err
  1383  }
  1384  
  1385  //------------------------------------------------------------------------------
  1386  
  1387  var genesisDocKey = []byte("genesisDoc")
  1388  
  1389  // LoadStateFromDBOrGenesisDocProvider attempts to load the state from the
  1390  // database, or creates one using the given genesisDocProvider. On success this also
  1391  // returns the genesis doc loaded through the given provider.
  1392  func LoadStateFromDBOrGenesisDocProvider(
  1393  	stateDB dbm.DB,
  1394  	genesisDocProvider GenesisDocProvider,
  1395  ) (sm.State, *types.GenesisDoc, error) {
  1396  	// Get genesis doc
  1397  	genDoc, err := loadGenesisDoc(stateDB)
  1398  	if err != nil {
  1399  		genDoc, err = genesisDocProvider()
  1400  		if err != nil {
  1401  			return sm.State{}, nil, err
  1402  		}
  1403  		// save genesis doc to prevent a certain class of user errors (e.g. when it
  1404  		// was changed, accidentally or not). Also good for audit trail.
  1405  		if err := saveGenesisDoc(stateDB, genDoc); err != nil {
  1406  			return sm.State{}, nil, err
  1407  		}
  1408  	}
  1409  	stateStore := sm.NewStore(stateDB, sm.StoreOptions{
  1410  		DiscardABCIResponses: false,
  1411  	})
  1412  	state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
  1413  	if err != nil {
  1414  		return sm.State{}, nil, err
  1415  	}
  1416  	return state, genDoc, nil
  1417  }
  1418  
  1419  // panics if failed to unmarshal bytes
  1420  func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
  1421  	b, err := db.Get(genesisDocKey)
  1422  	if err != nil {
  1423  		panic(err)
  1424  	}
  1425  	if len(b) == 0 {
  1426  		return nil, errors.New("genesis doc not found")
  1427  	}
  1428  	var genDoc *types.GenesisDoc
  1429  	err = tmjson.Unmarshal(b, &genDoc)
  1430  	if err != nil {
  1431  		panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b))
  1432  	}
  1433  	return genDoc, nil
  1434  }
  1435  
  1436  // panics if failed to marshal the given genesis document
  1437  func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error {
  1438  	b, err := tmjson.Marshal(genDoc)
  1439  	if err != nil {
  1440  		return fmt.Errorf("failed to save genesis doc due to marshaling error: %w", err)
  1441  	}
  1442  	if err := db.SetSync(genesisDocKey, b); err != nil {
  1443  		return err
  1444  	}
  1445  
  1446  	return nil
  1447  }
  1448  
  1449  func createAndStartPrivValidatorSocketClient(
  1450  	listenAddr,
  1451  	chainID string,
  1452  	logger log.Logger,
  1453  ) (types.PrivValidator, error) {
  1454  	pve, err := privval.NewSignerListener(listenAddr, logger)
  1455  	if err != nil {
  1456  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1457  	}
  1458  
  1459  	pvsc, err := privval.NewSignerClient(pve, chainID)
  1460  	if err != nil {
  1461  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1462  	}
  1463  
  1464  	// try to get a pubkey from private validate first time
  1465  	_, err = pvsc.GetPubKey()
  1466  	if err != nil {
  1467  		return nil, fmt.Errorf("can't get pubkey: %w", err)
  1468  	}
  1469  
  1470  	const (
  1471  		retries = 50 // 50 * 100ms = 5s total
  1472  		timeout = 100 * time.Millisecond
  1473  	)
  1474  	pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout)
  1475  
  1476  	return pvscWithRetries, nil
  1477  }
  1478  
  1479  // splitAndTrimEmpty slices s into all subslices separated by sep and returns a
  1480  // slice of the string s with all leading and trailing Unicode code points
  1481  // contained in cutset removed. If sep is empty, SplitAndTrim splits after each
  1482  // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
  1483  // -1.  also filter out empty strings, only return non-empty strings.
  1484  func splitAndTrimEmpty(s, sep, cutset string) []string {
  1485  	if s == "" {
  1486  		return []string{}
  1487  	}
  1488  
  1489  	spl := strings.Split(s, sep)
  1490  	nonEmptyStrings := make([]string, 0, len(spl))
  1491  	for i := 0; i < len(spl); i++ {
  1492  		element := strings.Trim(spl[i], cutset)
  1493  		if element != "" {
  1494  			nonEmptyStrings = append(nonEmptyStrings, element)
  1495  		}
  1496  	}
  1497  	return nonEmptyStrings
  1498  }