github.com/vipernet-xyz/tm@v0.34.24/test/maverick/node/node.go (about)

     1  package node
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"net"
     9  	"net/http"
    10  	_ "net/http/pprof" //nolint: gosec // securely exposed on separate, optional port
    11  	"strconv"
    12  	"strings"
    13  	"time"
    14  
    15  	"github.com/prometheus/client_golang/prometheus"
    16  	"github.com/prometheus/client_golang/prometheus/promhttp"
    17  	"github.com/rs/cors"
    18  
    19  	dbm "github.com/tendermint/tm-db"
    20  
    21  	abci "github.com/vipernet-xyz/tm/abci/types"
    22  	bcv0 "github.com/vipernet-xyz/tm/blockchain/v0"
    23  	bcv1 "github.com/vipernet-xyz/tm/blockchain/v1"
    24  	bcv2 "github.com/vipernet-xyz/tm/blockchain/v2"
    25  	cfg "github.com/vipernet-xyz/tm/config"
    26  	"github.com/vipernet-xyz/tm/consensus"
    27  	"github.com/vipernet-xyz/tm/crypto"
    28  	"github.com/vipernet-xyz/tm/evidence"
    29  	tmjson "github.com/vipernet-xyz/tm/libs/json"
    30  	"github.com/vipernet-xyz/tm/libs/log"
    31  	tmpubsub "github.com/vipernet-xyz/tm/libs/pubsub"
    32  	"github.com/vipernet-xyz/tm/libs/service"
    33  	"github.com/vipernet-xyz/tm/light"
    34  	mempl "github.com/vipernet-xyz/tm/mempool"
    35  	mempoolv0 "github.com/vipernet-xyz/tm/mempool/v0"
    36  	mempoolv1 "github.com/vipernet-xyz/tm/mempool/v1"
    37  	"github.com/vipernet-xyz/tm/p2p"
    38  	"github.com/vipernet-xyz/tm/p2p/pex"
    39  	"github.com/vipernet-xyz/tm/privval"
    40  	"github.com/vipernet-xyz/tm/proxy"
    41  	rpccore "github.com/vipernet-xyz/tm/rpc/core"
    42  	grpccore "github.com/vipernet-xyz/tm/rpc/grpc"
    43  	rpcserver "github.com/vipernet-xyz/tm/rpc/jsonrpc/server"
    44  	sm "github.com/vipernet-xyz/tm/state"
    45  	"github.com/vipernet-xyz/tm/state/indexer"
    46  	blockidxkv "github.com/vipernet-xyz/tm/state/indexer/block/kv"
    47  	blockidxnull "github.com/vipernet-xyz/tm/state/indexer/block/null"
    48  	"github.com/vipernet-xyz/tm/state/txindex"
    49  	"github.com/vipernet-xyz/tm/state/txindex/kv"
    50  	"github.com/vipernet-xyz/tm/state/txindex/null"
    51  	"github.com/vipernet-xyz/tm/statesync"
    52  	"github.com/vipernet-xyz/tm/store"
    53  	cs "github.com/vipernet-xyz/tm/test/maverick/consensus"
    54  	"github.com/vipernet-xyz/tm/types"
    55  	tmtime "github.com/vipernet-xyz/tm/types/time"
    56  	"github.com/vipernet-xyz/tm/version"
    57  )
    58  
    59  //------------------------------------------------------------------------------
    60  
    61  // ParseMisbehaviors is a util function that converts a comma separated string into
    62  // a map of misbehaviors to be executed by the maverick node
    63  func ParseMisbehaviors(str string) (map[int64]cs.Misbehavior, error) {
    64  	// check if string is empty in which case we run a normal node
    65  	misbehaviors := make(map[int64]cs.Misbehavior)
    66  	if str == "" {
    67  		return misbehaviors, nil
    68  	}
    69  	strs := strings.Split(str, ",")
    70  	if len(strs)%2 != 0 {
    71  		return misbehaviors, errors.New("missing either height or misbehavior name in the misbehavior flag")
    72  	}
    73  OUTER_LOOP:
    74  	for i := 0; i < len(strs); i += 2 {
    75  		height, err := strconv.ParseInt(strs[i+1], 10, 64)
    76  		if err != nil {
    77  			return misbehaviors, fmt.Errorf("failed to parse misbehavior height: %w", err)
    78  		}
    79  		for key, misbehavior := range cs.MisbehaviorList {
    80  			if key == strs[i] {
    81  				misbehaviors[height] = misbehavior
    82  				continue OUTER_LOOP
    83  			}
    84  		}
    85  		return misbehaviors, fmt.Errorf("received unknown misbehavior: %s. Did you forget to add it?", strs[i])
    86  	}
    87  
    88  	return misbehaviors, nil
    89  }
    90  
    91  // DBContext specifies config information for loading a new DB.
    92  type DBContext struct {
    93  	ID     string
    94  	Config *cfg.Config
    95  }
    96  
    97  // DBProvider takes a DBContext and returns an instantiated DB.
    98  type DBProvider func(*DBContext) (dbm.DB, error)
    99  
   100  const readHeaderTimeout = 10 * time.Second
   101  
   102  // DefaultDBProvider returns a database using the DBBackend and DBDir
   103  // specified in the ctx.Config.
   104  func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
   105  	dbType := dbm.BackendType(ctx.Config.DBBackend)
   106  	return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
   107  }
   108  
   109  // GenesisDocProvider returns a GenesisDoc.
   110  // It allows the GenesisDoc to be pulled from sources other than the
   111  // filesystem, for instance from a distributed key-value store cluster.
   112  type GenesisDocProvider func() (*types.GenesisDoc, error)
   113  
   114  // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
   115  // the GenesisDoc from the config.GenesisFile() on the filesystem.
   116  func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
   117  	return func() (*types.GenesisDoc, error) {
   118  		return types.GenesisDocFromFile(config.GenesisFile())
   119  	}
   120  }
   121  
   122  // Provider takes a config and a logger and returns a ready to go Node.
   123  type Provider func(*cfg.Config, log.Logger) (*Node, error)
   124  
   125  // DefaultNewNode returns a Tendermint node with default settings for the
   126  // PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
   127  // It implements NodeProvider.
   128  func DefaultNewNode(config *cfg.Config, logger log.Logger, misbehaviors map[int64]cs.Misbehavior) (*Node, error) {
   129  	nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
   130  	if err != nil {
   131  		return nil, fmt.Errorf("failed to load or gen node key %s, err: %w", config.NodeKeyFile(), err)
   132  	}
   133  
   134  	return NewNode(config,
   135  		LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()),
   136  		nodeKey,
   137  		proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
   138  		DefaultGenesisDocProviderFunc(config),
   139  		DefaultDBProvider,
   140  		DefaultMetricsProvider(config.Instrumentation),
   141  		logger,
   142  		misbehaviors,
   143  	)
   144  }
   145  
   146  // MetricsProvider returns a consensus, p2p and mempool Metrics.
   147  type MetricsProvider func(chainID string) (*consensus.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics)
   148  
   149  // DefaultMetricsProvider returns Metrics build using Prometheus client library
   150  // if Prometheus is enabled. Otherwise, it returns no-op Metrics.
   151  func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
   152  	return func(chainID string) (*consensus.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) {
   153  		if config.Prometheus {
   154  			return consensus.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   155  				p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   156  				mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   157  				sm.PrometheusMetrics(config.Namespace, "chain_id", chainID)
   158  		}
   159  		return consensus.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics()
   160  	}
   161  }
   162  
   163  // Option sets a parameter for the node.
   164  type Option func(*Node)
   165  
   166  // Temporary interface for switching to fast sync, we should get rid of v0 and v1 reactors.
   167  // See: https://github.com/vipernet-xyz/tm/issues/4595
   168  type fastSyncReactor interface {
   169  	SwitchToFastSync(sm.State) error
   170  }
   171  
   172  // CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to
   173  // the node's Switch.
   174  //
   175  // WARNING: using any name from the below list of the existing reactors will
   176  // result in replacing it with the custom one.
   177  //
   178  //   - MEMPOOL
   179  //   - BLOCKCHAIN
   180  //   - CONSENSUS
   181  //   - EVIDENCE
   182  //   - PEX
   183  //   - STATESYNC
   184  func CustomReactors(reactors map[string]p2p.Reactor) Option {
   185  	return func(n *Node) {
   186  		for name, reactor := range reactors {
   187  			if existingReactor := n.sw.Reactor(name); existingReactor != nil {
   188  				n.sw.Logger.Info("Replacing existing reactor with a custom one",
   189  					"name", name, "existing", existingReactor, "custom", reactor)
   190  				n.sw.RemoveReactor(name, existingReactor)
   191  			}
   192  			n.sw.AddReactor(name, reactor)
   193  		}
   194  	}
   195  }
   196  
   197  func CustomReactorsAsConstructors(reactors map[string]func(n *Node) p2p.Reactor) Option {
   198  	return func(n *Node) {
   199  		for name, customReactor := range reactors {
   200  			if existingReactor := n.sw.Reactor(name); existingReactor != nil {
   201  				n.sw.Logger.Info("Replacing existing reactor with a custom one",
   202  					"name", name)
   203  				n.sw.RemoveReactor(name, existingReactor)
   204  			}
   205  			n.sw.AddReactor(name, customReactor(n))
   206  		}
   207  	}
   208  }
   209  
   210  // StateProvider overrides the state provider used by state sync to retrieve trusted app hashes and
   211  // build a State object for bootstrapping the node.
   212  // WARNING: this interface is considered unstable and subject to change.
   213  func StateProvider(stateProvider statesync.StateProvider) Option {
   214  	return func(n *Node) {
   215  		n.stateSyncProvider = stateProvider
   216  	}
   217  }
   218  
   219  //------------------------------------------------------------------------------
   220  
   221  // Node is the highest level interface to a full Tendermint node.
   222  // It includes all configuration information and running services.
   223  type Node struct {
   224  	service.BaseService
   225  
   226  	// config
   227  	config        *cfg.Config
   228  	genesisDoc    *types.GenesisDoc   // initial validator set
   229  	privValidator types.PrivValidator // local node's validator key
   230  
   231  	// network
   232  	transport   *p2p.MultiplexTransport
   233  	sw          *p2p.Switch  // p2p connections
   234  	addrBook    pex.AddrBook // known peers
   235  	nodeInfo    p2p.NodeInfo
   236  	nodeKey     *p2p.NodeKey // our node privkey
   237  	isListening bool
   238  
   239  	// services
   240  	eventBus          *types.EventBus // pub/sub for services
   241  	stateStore        sm.Store
   242  	blockStore        *store.BlockStore // store the blockchain to disk
   243  	bcReactor         p2p.Reactor       // for fast-syncing
   244  	mempoolReactor    p2p.Reactor       // for gossipping transactions
   245  	mempool           mempl.Mempool
   246  	stateSync         bool                    // whether the node should state sync on startup
   247  	stateSyncReactor  *statesync.Reactor      // for hosting and restoring state sync snapshots
   248  	stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node
   249  	stateSyncGenesis  sm.State                // provides the genesis state for state sync
   250  	consensusState    *cs.State               // latest consensus state
   251  	consensusReactor  *cs.Reactor             // for participating in the consensus
   252  	pexReactor        *pex.Reactor            // for exchanging peer addresses
   253  	evidencePool      *evidence.Pool          // tracking evidence
   254  	proxyApp          proxy.AppConns          // connection to the application
   255  	rpcListeners      []net.Listener          // rpc servers
   256  	txIndexer         txindex.TxIndexer
   257  	blockIndexer      indexer.BlockIndexer
   258  	indexerService    *txindex.IndexerService
   259  	prometheusSrv     *http.Server
   260  }
   261  
   262  func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
   263  	var blockStoreDB dbm.DB
   264  	blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
   265  	if err != nil {
   266  		return
   267  	}
   268  	blockStore = store.NewBlockStore(blockStoreDB)
   269  
   270  	stateDB, err = dbProvider(&DBContext{"state", config})
   271  	if err != nil {
   272  		return
   273  	}
   274  
   275  	return
   276  }
   277  
   278  func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) {
   279  	proxyApp := proxy.NewAppConns(clientCreator)
   280  	proxyApp.SetLogger(logger.With("module", "proxy"))
   281  	if err := proxyApp.Start(); err != nil {
   282  		return nil, fmt.Errorf("error starting proxy app connections: %v", err)
   283  	}
   284  	return proxyApp, nil
   285  }
   286  
   287  func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
   288  	eventBus := types.NewEventBus()
   289  	eventBus.SetLogger(logger.With("module", "events"))
   290  	if err := eventBus.Start(); err != nil {
   291  		return nil, err
   292  	}
   293  	return eventBus, nil
   294  }
   295  
   296  func createAndStartIndexerService(
   297  	config *cfg.Config,
   298  	dbProvider DBProvider,
   299  	eventBus *types.EventBus,
   300  	logger log.Logger,
   301  ) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) {
   302  	var (
   303  		txIndexer    txindex.TxIndexer
   304  		blockIndexer indexer.BlockIndexer
   305  	)
   306  
   307  	switch config.TxIndex.Indexer {
   308  	case "kv":
   309  		store, err := dbProvider(&DBContext{"tx_index", config})
   310  		if err != nil {
   311  			return nil, nil, nil, err
   312  		}
   313  
   314  		txIndexer = kv.NewTxIndex(store)
   315  		blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events")))
   316  	default:
   317  		txIndexer = &null.TxIndex{}
   318  		blockIndexer = &blockidxnull.BlockerIndexer{}
   319  	}
   320  
   321  	indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus, false)
   322  	indexerService.SetLogger(logger.With("module", "txindex"))
   323  
   324  	if err := indexerService.Start(); err != nil {
   325  		return nil, nil, nil, err
   326  	}
   327  
   328  	return indexerService, txIndexer, blockIndexer, nil
   329  }
   330  
   331  func doHandshake(
   332  	stateStore sm.Store,
   333  	state sm.State,
   334  	blockStore sm.BlockStore,
   335  	genDoc *types.GenesisDoc,
   336  	eventBus types.BlockEventPublisher,
   337  	proxyApp proxy.AppConns,
   338  	consensusLogger log.Logger,
   339  ) error {
   340  	handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc)
   341  	handshaker.SetLogger(consensusLogger)
   342  	handshaker.SetEventBus(eventBus)
   343  	if err := handshaker.Handshake(proxyApp); err != nil {
   344  		return fmt.Errorf("error during handshake: %v", err)
   345  	}
   346  	return nil
   347  }
   348  
   349  func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) {
   350  	// Log the version info.
   351  	logger.Info("Version info",
   352  		"software", version.TMCoreSemVer,
   353  		"block", version.BlockProtocol,
   354  		"p2p", version.P2PProtocol,
   355  	)
   356  
   357  	// If the state and software differ in block version, at least log it.
   358  	if state.Version.Consensus.Block != version.BlockProtocol {
   359  		logger.Info("Software and state have different block protocols",
   360  			"software", version.BlockProtocol,
   361  			"state", state.Version.Consensus.Block,
   362  		)
   363  	}
   364  
   365  	addr := pubKey.Address()
   366  	// Log whether this node is a validator or an observer
   367  	if state.Validators.HasAddress(addr) {
   368  		consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey)
   369  	} else {
   370  		consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
   371  	}
   372  }
   373  
   374  func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
   375  	if state.Validators.Size() > 1 {
   376  		return false
   377  	}
   378  	addr, _ := state.Validators.GetByIndex(0)
   379  	return bytes.Equal(pubKey.Address(), addr)
   380  }
   381  
   382  func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns,
   383  	state sm.State, memplMetrics *mempl.Metrics, logger log.Logger,
   384  ) (p2p.Reactor, mempl.Mempool) {
   385  	switch config.Mempool.Version {
   386  	case cfg.MempoolV1:
   387  		mp := mempoolv1.NewTxMempool(
   388  			logger,
   389  			config.Mempool,
   390  			proxyApp.Mempool(),
   391  			state.LastBlockHeight,
   392  			mempoolv1.WithMetrics(memplMetrics),
   393  			mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
   394  			mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
   395  		)
   396  
   397  		reactor := mempoolv1.NewReactor(
   398  			config.Mempool,
   399  			mp,
   400  		)
   401  		if config.Consensus.WaitForTxs() {
   402  			mp.EnableTxsAvailable()
   403  		}
   404  
   405  		return reactor, mp
   406  
   407  	case cfg.MempoolV0:
   408  		mp := mempoolv0.NewCListMempool(
   409  			config.Mempool,
   410  			proxyApp.Mempool(),
   411  			state.LastBlockHeight,
   412  			mempoolv0.WithMetrics(memplMetrics),
   413  			mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
   414  			mempoolv0.WithPostCheck(sm.TxPostCheck(state)),
   415  		)
   416  
   417  		mp.SetLogger(logger)
   418  		mp.SetLogger(logger)
   419  
   420  		reactor := mempoolv0.NewReactor(
   421  			config.Mempool,
   422  			mp,
   423  		)
   424  		if config.Consensus.WaitForTxs() {
   425  			mp.EnableTxsAvailable()
   426  		}
   427  
   428  		return reactor, mp
   429  
   430  	default:
   431  		return nil, nil
   432  	}
   433  }
   434  
   435  func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
   436  	stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger,
   437  ) (*evidence.Reactor, *evidence.Pool, error) {
   438  	evidenceDB, err := dbProvider(&DBContext{"evidence", config})
   439  	if err != nil {
   440  		return nil, nil, err
   441  	}
   442  	stateStore := sm.NewStore(stateDB, sm.StoreOptions{
   443  		DiscardABCIResponses: config.Storage.DiscardABCIResponses,
   444  	})
   445  	evidenceLogger := logger.With("module", "evidence")
   446  	evidencePool, err := evidence.NewPool(evidenceDB, stateStore, blockStore)
   447  	if err != nil {
   448  		return nil, nil, err
   449  	}
   450  	evidenceReactor := evidence.NewReactor(evidencePool)
   451  	evidenceReactor.SetLogger(evidenceLogger)
   452  	return evidenceReactor, evidencePool, nil
   453  }
   454  
   455  func createBlockchainReactor(config *cfg.Config,
   456  	state sm.State,
   457  	blockExec *sm.BlockExecutor,
   458  	blockStore *store.BlockStore,
   459  	fastSync bool,
   460  	logger log.Logger,
   461  ) (bcReactor p2p.Reactor, err error) {
   462  	switch config.FastSync.Version {
   463  	case "v0":
   464  		bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   465  	case "v1":
   466  		bcReactor = bcv1.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   467  	case "v2":
   468  		bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   469  	default:
   470  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
   471  	}
   472  
   473  	bcReactor.SetLogger(logger.With("module", "blockchain"))
   474  	return bcReactor, nil
   475  }
   476  
   477  func createConsensusReactor(config *cfg.Config,
   478  	state sm.State,
   479  	blockExec *sm.BlockExecutor,
   480  	blockStore sm.BlockStore,
   481  	mempool mempl.Mempool,
   482  	evidencePool *evidence.Pool,
   483  	privValidator types.PrivValidator,
   484  	csMetrics *consensus.Metrics,
   485  	waitSync bool,
   486  	eventBus *types.EventBus,
   487  	consensusLogger log.Logger,
   488  	misbehaviors map[int64]cs.Misbehavior,
   489  ) (*cs.Reactor, *cs.State) {
   490  	consensusState := cs.NewState(
   491  		config.Consensus,
   492  		state.Copy(),
   493  		blockExec,
   494  		blockStore,
   495  		mempool,
   496  		evidencePool,
   497  		misbehaviors,
   498  		cs.StateMetrics(csMetrics),
   499  	)
   500  	consensusState.SetLogger(consensusLogger)
   501  	if privValidator != nil {
   502  		consensusState.SetPrivValidator(privValidator)
   503  	}
   504  	consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics))
   505  	consensusReactor.SetLogger(consensusLogger)
   506  	// services which will be publishing and/or subscribing for messages (events)
   507  	// consensusReactor will set it on consensusState and blockExecutor
   508  	consensusReactor.SetEventBus(eventBus)
   509  	return consensusReactor, consensusState
   510  }
   511  
   512  func createTransport(
   513  	config *cfg.Config,
   514  	nodeInfo p2p.NodeInfo,
   515  	nodeKey *p2p.NodeKey,
   516  	proxyApp proxy.AppConns,
   517  ) (
   518  	*p2p.MultiplexTransport,
   519  	[]p2p.PeerFilterFunc,
   520  ) {
   521  	var (
   522  		mConnConfig = p2p.MConnConfig(config.P2P)
   523  		transport   = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig)
   524  		connFilters = []p2p.ConnFilterFunc{}
   525  		peerFilters = []p2p.PeerFilterFunc{}
   526  	)
   527  
   528  	if !config.P2P.AllowDuplicateIP {
   529  		connFilters = append(connFilters, p2p.ConnDuplicateIPFilter())
   530  	}
   531  
   532  	// Filter peers by addr or pubkey with an ABCI query.
   533  	// If the query return code is OK, add peer.
   534  	if config.FilterPeers {
   535  		connFilters = append(
   536  			connFilters,
   537  			// ABCI query for address filtering.
   538  			func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
   539  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   540  					Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
   541  				})
   542  				if err != nil {
   543  					return err
   544  				}
   545  				if res.IsErr() {
   546  					return fmt.Errorf("error querying abci app: %v", res)
   547  				}
   548  
   549  				return nil
   550  			},
   551  		)
   552  
   553  		peerFilters = append(
   554  			peerFilters,
   555  			// ABCI query for ID filtering.
   556  			func(_ p2p.IPeerSet, p p2p.Peer) error {
   557  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   558  					Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
   559  				})
   560  				if err != nil {
   561  					return err
   562  				}
   563  				if res.IsErr() {
   564  					return fmt.Errorf("error querying abci app: %v", res)
   565  				}
   566  
   567  				return nil
   568  			},
   569  		)
   570  	}
   571  
   572  	p2p.MultiplexTransportConnFilters(connFilters...)(transport)
   573  
   574  	// Limit the number of incoming connections.
   575  	max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   576  	p2p.MultiplexTransportMaxIncomingConnections(max)(transport)
   577  
   578  	return transport, peerFilters
   579  }
   580  
   581  func createSwitch(config *cfg.Config,
   582  	transport p2p.Transport,
   583  	p2pMetrics *p2p.Metrics,
   584  	peerFilters []p2p.PeerFilterFunc,
   585  	mempoolReactor p2p.Reactor,
   586  	bcReactor p2p.Reactor,
   587  	stateSyncReactor *statesync.Reactor,
   588  	consensusReactor *cs.Reactor,
   589  	evidenceReactor *evidence.Reactor,
   590  	nodeInfo p2p.NodeInfo,
   591  	nodeKey *p2p.NodeKey,
   592  	p2pLogger log.Logger,
   593  ) *p2p.Switch {
   594  	sw := p2p.NewSwitch(
   595  		config.P2P,
   596  		transport,
   597  		p2p.WithMetrics(p2pMetrics),
   598  		p2p.SwitchPeerFilters(peerFilters...),
   599  	)
   600  	sw.SetLogger(p2pLogger)
   601  	sw.AddReactor("MEMPOOL", mempoolReactor)
   602  	sw.AddReactor("BLOCKCHAIN", bcReactor)
   603  	sw.AddReactor("CONSENSUS", consensusReactor)
   604  	sw.AddReactor("EVIDENCE", evidenceReactor)
   605  	sw.AddReactor("STATESYNC", stateSyncReactor)
   606  
   607  	sw.SetNodeInfo(nodeInfo)
   608  	sw.SetNodeKey(nodeKey)
   609  
   610  	p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
   611  	return sw
   612  }
   613  
   614  func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
   615  	p2pLogger log.Logger, nodeKey *p2p.NodeKey,
   616  ) (pex.AddrBook, error) {
   617  	addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
   618  	addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
   619  
   620  	// Add ourselves to addrbook to prevent dialing ourselves
   621  	if config.P2P.ExternalAddress != "" {
   622  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress))
   623  		if err != nil {
   624  			return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err)
   625  		}
   626  		addrBook.AddOurAddress(addr)
   627  	}
   628  	if config.P2P.ListenAddress != "" {
   629  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress))
   630  		if err != nil {
   631  			return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err)
   632  		}
   633  		addrBook.AddOurAddress(addr)
   634  	}
   635  
   636  	sw.SetAddrBook(addrBook)
   637  
   638  	return addrBook, nil
   639  }
   640  
   641  func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
   642  	sw *p2p.Switch, logger log.Logger,
   643  ) *pex.Reactor {
   644  	// TODO persistent peers ? so we can have their DNS addrs saved
   645  	pexReactor := pex.NewReactor(addrBook,
   646  		&pex.ReactorConfig{
   647  			Seeds:    splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
   648  			SeedMode: config.P2P.SeedMode,
   649  			// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
   650  			// blocks assuming 10s blocks ~ 28 hours.
   651  			// TODO (melekes): make it dynamic based on the actual block latencies
   652  			// from the live network.
   653  			// https://github.com/vipernet-xyz/tm/issues/3523
   654  			SeedDisconnectWaitPeriod:     28 * time.Hour,
   655  			PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
   656  		})
   657  	pexReactor.SetLogger(logger.With("module", "pex"))
   658  	sw.AddReactor("PEX", pexReactor)
   659  	return pexReactor
   660  }
   661  
   662  // startStateSync starts an asynchronous state sync process, then switches to fast sync mode.
   663  func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor,
   664  	stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool,
   665  	stateStore sm.Store, blockStore *store.BlockStore, state sm.State,
   666  ) error {
   667  	ssR.Logger.Info("Starting state sync")
   668  
   669  	if stateProvider == nil {
   670  		var err error
   671  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   672  		defer cancel()
   673  		stateProvider, err = statesync.NewLightClientStateProvider(
   674  			ctx,
   675  			state.ChainID, state.Version, state.InitialHeight,
   676  			config.RPCServers, light.TrustOptions{
   677  				Period: config.TrustPeriod,
   678  				Height: config.TrustHeight,
   679  				Hash:   config.TrustHashBytes(),
   680  			}, ssR.Logger.With("module", "light"))
   681  		if err != nil {
   682  			return fmt.Errorf("failed to set up light client state provider: %w", err)
   683  		}
   684  	}
   685  
   686  	go func() {
   687  		state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime)
   688  		if err != nil {
   689  			ssR.Logger.Error("State sync failed", "err", err)
   690  			return
   691  		}
   692  		err = stateStore.Bootstrap(state)
   693  		if err != nil {
   694  			ssR.Logger.Error("Failed to bootstrap node with new state", "err", err)
   695  			return
   696  		}
   697  		err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit)
   698  		if err != nil {
   699  			ssR.Logger.Error("Failed to store last seen commit", "err", err)
   700  			return
   701  		}
   702  
   703  		if fastSync {
   704  			// FIXME Very ugly to have these metrics bleed through here.
   705  			conR.Metrics.StateSyncing.Set(0)
   706  			conR.Metrics.FastSyncing.Set(1)
   707  			err = bcR.SwitchToFastSync(state)
   708  			if err != nil {
   709  				ssR.Logger.Error("Failed to switch to fast sync", "err", err)
   710  				return
   711  			}
   712  		} else {
   713  			conR.SwitchToConsensus(state, true)
   714  		}
   715  	}()
   716  	return nil
   717  }
   718  
   719  // NewNode returns a new, ready to go, Tendermint Node.
   720  func NewNode(config *cfg.Config,
   721  	privValidator types.PrivValidator,
   722  	nodeKey *p2p.NodeKey,
   723  	clientCreator proxy.ClientCreator,
   724  	genesisDocProvider GenesisDocProvider,
   725  	dbProvider DBProvider,
   726  	metricsProvider MetricsProvider,
   727  	logger log.Logger,
   728  	misbehaviors map[int64]cs.Misbehavior,
   729  	options ...Option,
   730  ) (*Node, error) {
   731  	blockStore, stateDB, err := initDBs(config, dbProvider)
   732  	if err != nil {
   733  		return nil, err
   734  	}
   735  
   736  	stateStore := sm.NewStore(stateDB, sm.StoreOptions{
   737  		DiscardABCIResponses: false,
   738  	})
   739  
   740  	state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
   741  	if err != nil {
   742  		return nil, err
   743  	}
   744  
   745  	// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
   746  	proxyApp, err := createAndStartProxyAppConns(clientCreator, logger)
   747  	if err != nil {
   748  		return nil, err
   749  	}
   750  
   751  	// EventBus and IndexerService must be started before the handshake because
   752  	// we might need to index the txs of the replayed block as this might not have happened
   753  	// when the node stopped last time (i.e. the node stopped after it saved the block
   754  	// but before it indexed the txs, or, endblocker panicked)
   755  	eventBus, err := createAndStartEventBus(logger)
   756  	if err != nil {
   757  		return nil, err
   758  	}
   759  
   760  	indexerService, txIndexer, blockIndexer, err := createAndStartIndexerService(config, dbProvider, eventBus, logger)
   761  	if err != nil {
   762  		return nil, err
   763  	}
   764  
   765  	// If an address is provided, listen on the socket for a connection from an
   766  	// external signing process.
   767  	if config.PrivValidatorListenAddr != "" {
   768  		// FIXME: we should start services inside OnStart
   769  		privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, genDoc.ChainID, logger)
   770  		if err != nil {
   771  			return nil, fmt.Errorf("error with private validator socket client: %w", err)
   772  		}
   773  	}
   774  
   775  	pubKey, err := privValidator.GetPubKey()
   776  	if err != nil {
   777  		return nil, fmt.Errorf("can't get pubkey: %w", err)
   778  	}
   779  
   780  	// Determine whether we should do state and/or fast sync.
   781  	// We don't fast-sync when the only validator is us.
   782  	fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey)
   783  	stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey)
   784  	if stateSync && state.LastBlockHeight > 0 {
   785  		logger.Info("Found local state with non-zero height, skipping state sync")
   786  		stateSync = false
   787  	}
   788  
   789  	// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
   790  	// and replays any blocks as necessary to sync tendermint with the app.
   791  	consensusLogger := logger.With("module", "consensus")
   792  	if !stateSync {
   793  		if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil {
   794  			return nil, err
   795  		}
   796  
   797  		// Reload the state. It will have the Version.Consensus.App set by the
   798  		// Handshake, and may have other modifications as well (ie. depending on
   799  		// what happened during block replay).
   800  		state, err = stateStore.Load()
   801  		if err != nil {
   802  			return nil, fmt.Errorf("cannot load state: %w", err)
   803  		}
   804  	}
   805  
   806  	logNodeStartupInfo(state, pubKey, logger, consensusLogger)
   807  
   808  	csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID)
   809  
   810  	// Make MempoolReactor
   811  	mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
   812  
   813  	// Make Evidence Reactor
   814  	evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger)
   815  	if err != nil {
   816  		return nil, err
   817  	}
   818  
   819  	// make block executor for consensus and blockchain reactors to execute blocks
   820  	blockExec := sm.NewBlockExecutor(
   821  		stateStore,
   822  		logger.With("module", "state"),
   823  		proxyApp.Consensus(),
   824  		mempool,
   825  		evidencePool,
   826  		sm.BlockExecutorWithMetrics(smMetrics),
   827  	)
   828  
   829  	// Make BlockchainReactor. Don't start fast sync if we're doing a state sync first.
   830  	bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync && !stateSync, logger)
   831  	if err != nil {
   832  		return nil, fmt.Errorf("could not create blockchain reactor: %w", err)
   833  	}
   834  
   835  	// Make ConsensusReactor. Don't enable fully if doing a state sync and/or fast sync first.
   836  	// FIXME We need to update metrics here, since other reactors don't have access to them.
   837  	if stateSync {
   838  		csMetrics.StateSyncing.Set(1)
   839  	} else if fastSync {
   840  		csMetrics.FastSyncing.Set(1)
   841  	}
   842  
   843  	logger.Info("Setting up maverick consensus reactor", "Misbehaviors", misbehaviors)
   844  	consensusReactor, consensusState := createConsensusReactor(
   845  		config, state, blockExec, blockStore, mempool, evidencePool,
   846  		privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger, misbehaviors)
   847  
   848  	// Set up state sync reactor, and schedule a sync if requested.
   849  	// FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy,
   850  	// we should clean this whole thing up. See:
   851  	// https://github.com/vipernet-xyz/tm/issues/4644
   852  	stateSyncReactor := statesync.NewReactor(
   853  		*config.StateSync,
   854  		proxyApp.Snapshot(),
   855  		proxyApp.Query(),
   856  		config.StateSync.TempDir,
   857  	)
   858  	stateSyncReactor.SetLogger(logger.With("module", "statesync"))
   859  
   860  	nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state)
   861  	if err != nil {
   862  		return nil, err
   863  	}
   864  
   865  	// Setup Transport.
   866  	transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp)
   867  
   868  	// Setup Switch.
   869  	p2pLogger := logger.With("module", "p2p")
   870  	sw := createSwitch(
   871  		config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor,
   872  		stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger,
   873  	)
   874  
   875  	err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
   876  	if err != nil {
   877  		return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err)
   878  	}
   879  
   880  	err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   881  	if err != nil {
   882  		return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
   883  	}
   884  
   885  	addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
   886  	if err != nil {
   887  		return nil, fmt.Errorf("could not create addrbook: %w", err)
   888  	}
   889  
   890  	// Optionally, start the pex reactor
   891  	//
   892  	// TODO:
   893  	//
   894  	// We need to set Seeds and PersistentPeers on the switch,
   895  	// since it needs to be able to use these (and their DNS names)
   896  	// even if the PEX is off. We can include the DNS name in the NetAddress,
   897  	// but it would still be nice to have a clear list of the current "PersistentPeers"
   898  	// somewhere that we can return with net_info.
   899  	//
   900  	// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
   901  	// Note we currently use the addrBook regardless at least for AddOurAddress
   902  	var pexReactor *pex.Reactor
   903  	if config.P2P.PexReactor {
   904  		pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
   905  	}
   906  
   907  	if config.RPC.PprofListenAddress != "" {
   908  		go func() {
   909  			logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress)
   910  			//nolint:gosec,nolintlint // G114: Use of net/http serve function that has no support for setting timeouts
   911  			logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil))
   912  		}()
   913  	}
   914  
   915  	node := &Node{
   916  		config:        config,
   917  		genesisDoc:    genDoc,
   918  		privValidator: privValidator,
   919  
   920  		transport: transport,
   921  		sw:        sw,
   922  		addrBook:  addrBook,
   923  		nodeInfo:  nodeInfo,
   924  		nodeKey:   nodeKey,
   925  
   926  		stateStore:       stateStore,
   927  		blockStore:       blockStore,
   928  		bcReactor:        bcReactor,
   929  		mempoolReactor:   mempoolReactor,
   930  		mempool:          mempool,
   931  		consensusState:   consensusState,
   932  		consensusReactor: consensusReactor,
   933  		stateSyncReactor: stateSyncReactor,
   934  		stateSync:        stateSync,
   935  		stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state
   936  		pexReactor:       pexReactor,
   937  		evidencePool:     evidencePool,
   938  		proxyApp:         proxyApp,
   939  		txIndexer:        txIndexer,
   940  		indexerService:   indexerService,
   941  		blockIndexer:     blockIndexer,
   942  		eventBus:         eventBus,
   943  	}
   944  	node.BaseService = *service.NewBaseService(logger, "Node", node)
   945  
   946  	for _, option := range options {
   947  		option(node)
   948  	}
   949  
   950  	return node, nil
   951  }
   952  
   953  // OnStart starts the Node. It implements service.Service.
   954  func (n *Node) OnStart() error {
   955  	now := tmtime.Now()
   956  	genTime := n.genesisDoc.GenesisTime
   957  	if genTime.After(now) {
   958  		n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime)
   959  		time.Sleep(genTime.Sub(now))
   960  	}
   961  
   962  	// Add private IDs to addrbook to block those peers being added
   963  	n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
   964  
   965  	// Start the RPC server before the P2P server
   966  	// so we can eg. receive txs for the first block
   967  	if n.config.RPC.ListenAddress != "" {
   968  		listeners, err := n.startRPC()
   969  		if err != nil {
   970  			return err
   971  		}
   972  		n.rpcListeners = listeners
   973  	}
   974  
   975  	if n.config.Instrumentation.Prometheus &&
   976  		n.config.Instrumentation.PrometheusListenAddr != "" {
   977  		n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
   978  	}
   979  
   980  	// Start the transport.
   981  	addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress))
   982  	if err != nil {
   983  		return err
   984  	}
   985  	if err := n.transport.Listen(*addr); err != nil {
   986  		return err
   987  	}
   988  
   989  	n.isListening = true
   990  
   991  	// Start the switch (the P2P server).
   992  	err = n.sw.Start()
   993  	if err != nil {
   994  		return err
   995  	}
   996  
   997  	// Always connect to persistent peers
   998  	err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
   999  	if err != nil {
  1000  		return fmt.Errorf("could not dial peers from persistent_peers field: %w", err)
  1001  	}
  1002  
  1003  	// Run state sync
  1004  	if n.stateSync {
  1005  		bcR, ok := n.bcReactor.(fastSyncReactor)
  1006  		if !ok {
  1007  			return fmt.Errorf("this blockchain reactor does not support switching from state sync")
  1008  		}
  1009  		err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider,
  1010  			n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis)
  1011  		if err != nil {
  1012  			return fmt.Errorf("failed to start state sync: %w", err)
  1013  		}
  1014  	}
  1015  
  1016  	return nil
  1017  }
  1018  
  1019  // OnStop stops the Node. It implements service.Service.
  1020  func (n *Node) OnStop() {
  1021  	n.BaseService.OnStop()
  1022  
  1023  	n.Logger.Info("Stopping Node")
  1024  
  1025  	// first stop the non-reactor services
  1026  	if err := n.eventBus.Stop(); err != nil {
  1027  		n.Logger.Error("Error closing eventBus", "err", err)
  1028  	}
  1029  	if err := n.indexerService.Stop(); err != nil {
  1030  		n.Logger.Error("Error closing indexerService", "err", err)
  1031  	}
  1032  
  1033  	// now stop the reactors
  1034  	if err := n.sw.Stop(); err != nil {
  1035  		n.Logger.Error("Error closing switch", "err", err)
  1036  	}
  1037  
  1038  	if err := n.transport.Close(); err != nil {
  1039  		n.Logger.Error("Error closing transport", "err", err)
  1040  	}
  1041  
  1042  	n.isListening = false
  1043  
  1044  	// finally stop the listeners / external services
  1045  	for _, l := range n.rpcListeners {
  1046  		n.Logger.Info("Closing rpc listener", "listener", l)
  1047  		if err := l.Close(); err != nil {
  1048  			n.Logger.Error("Error closing listener", "listener", l, "err", err)
  1049  		}
  1050  	}
  1051  
  1052  	if pvsc, ok := n.privValidator.(service.Service); ok {
  1053  		if err := pvsc.Stop(); err != nil {
  1054  			n.Logger.Error("Error closing private validator", "err", err)
  1055  		}
  1056  	}
  1057  
  1058  	if n.prometheusSrv != nil {
  1059  		if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
  1060  			// Error from closing listeners, or context timeout:
  1061  			n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
  1062  		}
  1063  	}
  1064  }
  1065  
  1066  // ConfigureRPC makes sure RPC has all the objects it needs to operate.
  1067  func (n *Node) ConfigureRPC() error {
  1068  	pubKey, err := n.privValidator.GetPubKey()
  1069  	if err != nil {
  1070  		return fmt.Errorf("can't get pubkey: %w", err)
  1071  	}
  1072  	rpccore.SetEnvironment(&rpccore.Environment{
  1073  		ProxyAppQuery:   n.proxyApp.Query(),
  1074  		ProxyAppMempool: n.proxyApp.Mempool(),
  1075  
  1076  		StateStore:     n.stateStore,
  1077  		BlockStore:     n.blockStore,
  1078  		EvidencePool:   n.evidencePool,
  1079  		ConsensusState: n.consensusState,
  1080  		P2PPeers:       n.sw,
  1081  		P2PTransport:   n,
  1082  
  1083  		PubKey:           pubKey,
  1084  		GenDoc:           n.genesisDoc,
  1085  		TxIndexer:        n.txIndexer,
  1086  		BlockIndexer:     n.blockIndexer,
  1087  		ConsensusReactor: &consensus.Reactor{},
  1088  		EventBus:         n.eventBus,
  1089  		Mempool:          n.mempool,
  1090  
  1091  		Logger: n.Logger.With("module", "rpc"),
  1092  
  1093  		Config: *n.config.RPC,
  1094  	})
  1095  	return nil
  1096  }
  1097  
  1098  func (n *Node) startRPC() ([]net.Listener, error) {
  1099  	err := n.ConfigureRPC()
  1100  	if err != nil {
  1101  		return nil, err
  1102  	}
  1103  
  1104  	listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
  1105  
  1106  	if n.config.RPC.Unsafe {
  1107  		rpccore.AddUnsafeRoutes()
  1108  	}
  1109  
  1110  	config := rpcserver.DefaultConfig()
  1111  	config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1112  	config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1113  	config.MaxOpenConnections = n.config.RPC.MaxOpenConnections
  1114  	// If necessary adjust global WriteTimeout to ensure it's greater than
  1115  	// TimeoutBroadcastTxCommit.
  1116  	// See https://github.com/vipernet-xyz/tm/issues/3435
  1117  	if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1118  		config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1119  	}
  1120  
  1121  	// we may expose the rpc over both a unix and tcp socket
  1122  	listeners := make([]net.Listener, len(listenAddrs))
  1123  	for i, listenAddr := range listenAddrs {
  1124  		mux := http.NewServeMux()
  1125  		rpcLogger := n.Logger.With("module", "rpc-server")
  1126  		wmLogger := rpcLogger.With("protocol", "websocket")
  1127  		wm := rpcserver.NewWebsocketManager(rpccore.Routes,
  1128  			rpcserver.OnDisconnect(func(remoteAddr string) {
  1129  				err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr)
  1130  				if err != nil && err != tmpubsub.ErrSubscriptionNotFound {
  1131  					wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err)
  1132  				}
  1133  			}),
  1134  			rpcserver.ReadLimit(config.MaxBodyBytes),
  1135  		)
  1136  		wm.SetLogger(wmLogger)
  1137  		mux.HandleFunc("/websocket", wm.WebsocketHandler)
  1138  		rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger)
  1139  		listener, err := rpcserver.Listen(
  1140  			listenAddr,
  1141  			config,
  1142  		)
  1143  		if err != nil {
  1144  			return nil, err
  1145  		}
  1146  
  1147  		var rootHandler http.Handler = mux
  1148  		if n.config.RPC.IsCorsEnabled() {
  1149  			corsMiddleware := cors.New(cors.Options{
  1150  				AllowedOrigins: n.config.RPC.CORSAllowedOrigins,
  1151  				AllowedMethods: n.config.RPC.CORSAllowedMethods,
  1152  				AllowedHeaders: n.config.RPC.CORSAllowedHeaders,
  1153  			})
  1154  			rootHandler = corsMiddleware.Handler(mux)
  1155  		}
  1156  		if n.config.RPC.IsTLSEnabled() {
  1157  			go func() {
  1158  				if err := rpcserver.ServeTLS(
  1159  					listener,
  1160  					rootHandler,
  1161  					n.config.RPC.CertFile(),
  1162  					n.config.RPC.KeyFile(),
  1163  					rpcLogger,
  1164  					config,
  1165  				); err != nil {
  1166  					n.Logger.Error("Error serving server with TLS", "err", err)
  1167  				}
  1168  			}()
  1169  		} else {
  1170  			go func() {
  1171  				if err := rpcserver.Serve(
  1172  					listener,
  1173  					rootHandler,
  1174  					rpcLogger,
  1175  					config,
  1176  				); err != nil {
  1177  					n.Logger.Error("Error serving server", "err", err)
  1178  				}
  1179  			}()
  1180  		}
  1181  
  1182  		listeners[i] = listener
  1183  	}
  1184  
  1185  	// we expose a simplified api over grpc for convenience to app devs
  1186  	grpcListenAddr := n.config.RPC.GRPCListenAddress
  1187  	if grpcListenAddr != "" {
  1188  		config := rpcserver.DefaultConfig()
  1189  		config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1190  		config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1191  		// NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections
  1192  		config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections
  1193  		// If necessary adjust global WriteTimeout to ensure it's greater than
  1194  		// TimeoutBroadcastTxCommit.
  1195  		// See https://github.com/vipernet-xyz/tm/issues/3435
  1196  		if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1197  			config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1198  		}
  1199  		listener, err := rpcserver.Listen(grpcListenAddr, config)
  1200  		if err != nil {
  1201  			return nil, err
  1202  		}
  1203  		go func() {
  1204  			if err := grpccore.StartGRPCServer(listener); err != nil {
  1205  				n.Logger.Error("Error starting gRPC server", "err", err)
  1206  			}
  1207  		}()
  1208  		listeners = append(listeners, listener)
  1209  	}
  1210  
  1211  	return listeners, nil
  1212  }
  1213  
  1214  // startPrometheusServer starts a Prometheus HTTP server, listening for metrics
  1215  // collectors on addr.
  1216  func (n *Node) startPrometheusServer(addr string) *http.Server {
  1217  	srv := &http.Server{
  1218  		Addr: addr,
  1219  		Handler: promhttp.InstrumentMetricHandler(
  1220  			prometheus.DefaultRegisterer, promhttp.HandlerFor(
  1221  				prometheus.DefaultGatherer,
  1222  				promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
  1223  			),
  1224  		),
  1225  		ReadHeaderTimeout: readHeaderTimeout,
  1226  	}
  1227  	go func() {
  1228  		if err := srv.ListenAndServe(); err != http.ErrServerClosed {
  1229  			// Error starting or closing listener:
  1230  			n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
  1231  		}
  1232  	}()
  1233  	return srv
  1234  }
  1235  
  1236  // Switch returns the Node's Switch.
  1237  func (n *Node) Switch() *p2p.Switch {
  1238  	return n.sw
  1239  }
  1240  
  1241  // BlockStore returns the Node's BlockStore.
  1242  func (n *Node) BlockStore() *store.BlockStore {
  1243  	return n.blockStore
  1244  }
  1245  
  1246  // ConsensusState returns the Node's ConsensusState.
  1247  func (n *Node) ConsensusState() *cs.State {
  1248  	return n.consensusState
  1249  }
  1250  
  1251  // ConsensusReactor returns the Node's ConsensusReactor.
  1252  func (n *Node) ConsensusReactor() *cs.Reactor {
  1253  	return n.consensusReactor
  1254  }
  1255  
  1256  // MempoolReactor returns the Node's mempool reactor.
  1257  func (n *Node) MempoolReactor() p2p.Reactor {
  1258  	return n.mempoolReactor
  1259  }
  1260  
  1261  // Mempool returns the Node's mempool.
  1262  func (n *Node) Mempool() mempl.Mempool {
  1263  	return n.mempool
  1264  }
  1265  
  1266  // PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled.
  1267  func (n *Node) PEXReactor() *pex.Reactor {
  1268  	return n.pexReactor
  1269  }
  1270  
  1271  // EvidencePool returns the Node's EvidencePool.
  1272  func (n *Node) EvidencePool() *evidence.Pool {
  1273  	return n.evidencePool
  1274  }
  1275  
  1276  // EventBus returns the Node's EventBus.
  1277  func (n *Node) EventBus() *types.EventBus {
  1278  	return n.eventBus
  1279  }
  1280  
  1281  // PrivValidator returns the Node's PrivValidator.
  1282  // XXX: for convenience only!
  1283  func (n *Node) PrivValidator() types.PrivValidator {
  1284  	return n.privValidator
  1285  }
  1286  
  1287  // GenesisDoc returns the Node's GenesisDoc.
  1288  func (n *Node) GenesisDoc() *types.GenesisDoc {
  1289  	return n.genesisDoc
  1290  }
  1291  
  1292  // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
  1293  func (n *Node) ProxyApp() proxy.AppConns {
  1294  	return n.proxyApp
  1295  }
  1296  
  1297  // Config returns the Node's config.
  1298  func (n *Node) Config() *cfg.Config {
  1299  	return n.config
  1300  }
  1301  
  1302  //------------------------------------------------------------------------------
  1303  
  1304  func (n *Node) Listeners() []string {
  1305  	return []string{
  1306  		fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress),
  1307  	}
  1308  }
  1309  
  1310  func (n *Node) IsListening() bool {
  1311  	return n.isListening
  1312  }
  1313  
  1314  // NodeInfo returns the Node's Info from the Switch.
  1315  func (n *Node) NodeInfo() p2p.NodeInfo {
  1316  	return n.nodeInfo
  1317  }
  1318  
  1319  func makeNodeInfo(
  1320  	config *cfg.Config,
  1321  	nodeKey *p2p.NodeKey,
  1322  	txIndexer txindex.TxIndexer,
  1323  	genDoc *types.GenesisDoc,
  1324  	state sm.State,
  1325  ) (p2p.NodeInfo, error) {
  1326  	txIndexerStatus := "on"
  1327  	if _, ok := txIndexer.(*null.TxIndex); ok {
  1328  		txIndexerStatus = "off"
  1329  	}
  1330  
  1331  	var bcChannel byte
  1332  	switch config.FastSync.Version {
  1333  	case "v0":
  1334  		bcChannel = bcv0.BlockchainChannel
  1335  	case "v1":
  1336  		bcChannel = bcv1.BlockchainChannel
  1337  	case "v2":
  1338  		bcChannel = bcv2.BlockchainChannel
  1339  	default:
  1340  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
  1341  	}
  1342  
  1343  	nodeInfo := p2p.DefaultNodeInfo{
  1344  		ProtocolVersion: p2p.NewProtocolVersion(
  1345  			version.P2PProtocol, // global
  1346  			state.Version.Consensus.Block,
  1347  			state.Version.Consensus.App,
  1348  		),
  1349  		DefaultNodeID: nodeKey.ID(),
  1350  		Network:       genDoc.ChainID,
  1351  		Version:       version.TMCoreSemVer,
  1352  		Channels: []byte{
  1353  			bcChannel,
  1354  			cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
  1355  			mempl.MempoolChannel,
  1356  			evidence.EvidenceChannel,
  1357  			statesync.SnapshotChannel, statesync.ChunkChannel,
  1358  		},
  1359  		Moniker: config.Moniker,
  1360  		Other: p2p.DefaultNodeInfoOther{
  1361  			TxIndex:    txIndexerStatus,
  1362  			RPCAddress: config.RPC.ListenAddress,
  1363  		},
  1364  	}
  1365  
  1366  	if config.P2P.PexReactor {
  1367  		nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  1368  	}
  1369  
  1370  	lAddr := config.P2P.ExternalAddress
  1371  
  1372  	if lAddr == "" {
  1373  		lAddr = config.P2P.ListenAddress
  1374  	}
  1375  
  1376  	nodeInfo.ListenAddr = lAddr
  1377  
  1378  	err := nodeInfo.Validate()
  1379  	return nodeInfo, err
  1380  }
  1381  
  1382  //------------------------------------------------------------------------------
  1383  
  1384  var genesisDocKey = []byte("genesisDoc")
  1385  
  1386  // LoadStateFromDBOrGenesisDocProvider attempts to load the state from the
  1387  // database, or creates one using the given genesisDocProvider and persists the
  1388  // result to the database. On success this also returns the genesis doc loaded
  1389  // through the given provider.
  1390  func LoadStateFromDBOrGenesisDocProvider(
  1391  	stateDB dbm.DB,
  1392  	genesisDocProvider GenesisDocProvider,
  1393  ) (sm.State, *types.GenesisDoc, error) {
  1394  	// Get genesis doc
  1395  	genDoc, err := loadGenesisDoc(stateDB)
  1396  	if err != nil {
  1397  		genDoc, err = genesisDocProvider()
  1398  		if err != nil {
  1399  			return sm.State{}, nil, err
  1400  		}
  1401  		// save genesis doc to prevent a certain class of user errors (e.g. when it
  1402  		// was changed, accidentally or not). Also good for audit trail.
  1403  		saveGenesisDoc(stateDB, genDoc)
  1404  	}
  1405  	stateStore := sm.NewStore(stateDB, sm.StoreOptions{
  1406  		DiscardABCIResponses: false,
  1407  	})
  1408  	state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
  1409  	if err != nil {
  1410  		return sm.State{}, nil, err
  1411  	}
  1412  	return state, genDoc, nil
  1413  }
  1414  
  1415  // panics if failed to unmarshal bytes
  1416  func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
  1417  	b, err := db.Get(genesisDocKey)
  1418  	if err != nil {
  1419  		panic(err)
  1420  	}
  1421  	if len(b) == 0 {
  1422  		return nil, errors.New("genesis doc not found")
  1423  	}
  1424  	var genDoc *types.GenesisDoc
  1425  	err = tmjson.Unmarshal(b, &genDoc)
  1426  	if err != nil {
  1427  		panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b))
  1428  	}
  1429  	return genDoc, nil
  1430  }
  1431  
  1432  // panics if failed to marshal the given genesis document
  1433  func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) {
  1434  	b, err := tmjson.Marshal(genDoc)
  1435  	if err != nil {
  1436  		panic(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
  1437  	}
  1438  	if err := db.SetSync(genesisDocKey, b); err != nil {
  1439  		panic(fmt.Sprintf("Failed to save genesis doc: %v", err))
  1440  	}
  1441  }
  1442  
  1443  func createAndStartPrivValidatorSocketClient(
  1444  	listenAddr,
  1445  	chainID string,
  1446  	logger log.Logger,
  1447  ) (types.PrivValidator, error) {
  1448  	pve, err := privval.NewSignerListener(listenAddr, logger)
  1449  	if err != nil {
  1450  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1451  	}
  1452  
  1453  	pvsc, err := privval.NewSignerClient(pve, chainID)
  1454  	if err != nil {
  1455  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1456  	}
  1457  
  1458  	// try to get a pubkey from private validate first time
  1459  	_, err = pvsc.GetPubKey()
  1460  	if err != nil {
  1461  		return nil, fmt.Errorf("can't get pubkey: %w", err)
  1462  	}
  1463  
  1464  	const (
  1465  		retries = 50 // 50 * 100ms = 5s total
  1466  		timeout = 100 * time.Millisecond
  1467  	)
  1468  	pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout)
  1469  
  1470  	return pvscWithRetries, nil
  1471  }
  1472  
  1473  // splitAndTrimEmpty slices s into all subslices separated by sep and returns a
  1474  // slice of the string s with all leading and trailing Unicode code points
  1475  // contained in cutset removed. If sep is empty, SplitAndTrim splits after each
  1476  // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
  1477  // -1.  also filter out empty strings, only return non-empty strings.
  1478  func splitAndTrimEmpty(s, sep, cutset string) []string {
  1479  	if s == "" {
  1480  		return []string{}
  1481  	}
  1482  
  1483  	spl := strings.Split(s, sep)
  1484  	nonEmptyStrings := make([]string, 0, len(spl))
  1485  	for i := 0; i < len(spl); i++ {
  1486  		element := strings.Trim(spl[i], cutset)
  1487  		if element != "" {
  1488  			nonEmptyStrings = append(nonEmptyStrings, element)
  1489  		}
  1490  	}
  1491  	return nonEmptyStrings
  1492  }