github.com/badrootd/nibiru-cometbft@v0.37.5-0.20240307173500-2a75559eee9b/node/node.go (about)

     1  package node
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"net"
     9  	"net/http"
    10  	"os"
    11  	"strings"
    12  	"time"
    13  
    14  	dbm "github.com/badrootd/nibiru-db"
    15  	"github.com/prometheus/client_golang/prometheus"
    16  	"github.com/prometheus/client_golang/prometheus/promhttp"
    17  	"github.com/rs/cors"
    18  
    19  	abci "github.com/badrootd/nibiru-cometbft/abci/types"
    20  	bc "github.com/badrootd/nibiru-cometbft/blocksync"
    21  	cfg "github.com/badrootd/nibiru-cometbft/config"
    22  	cs "github.com/badrootd/nibiru-cometbft/consensus"
    23  	"github.com/badrootd/nibiru-cometbft/crypto"
    24  	"github.com/badrootd/nibiru-cometbft/evidence"
    25  	"github.com/badrootd/nibiru-cometbft/light"
    26  
    27  	cmtjson "github.com/badrootd/nibiru-cometbft/libs/json"
    28  	"github.com/badrootd/nibiru-cometbft/libs/log"
    29  	cmtpubsub "github.com/badrootd/nibiru-cometbft/libs/pubsub"
    30  	"github.com/badrootd/nibiru-cometbft/libs/service"
    31  
    32  	mempl "github.com/badrootd/nibiru-cometbft/mempool"
    33  	mempoolv0 "github.com/badrootd/nibiru-cometbft/mempool/v0"
    34  	mempoolv1 "github.com/badrootd/nibiru-cometbft/mempool/v1" //nolint:staticcheck // SA1019 Priority mempool deprecated but still supported in this release.
    35  	"github.com/badrootd/nibiru-cometbft/p2p"
    36  	"github.com/badrootd/nibiru-cometbft/p2p/pex"
    37  	"github.com/badrootd/nibiru-cometbft/privval"
    38  	"github.com/badrootd/nibiru-cometbft/proxy"
    39  	rpccore "github.com/badrootd/nibiru-cometbft/rpc/core"
    40  	grpccore "github.com/badrootd/nibiru-cometbft/rpc/grpc"
    41  	rpcserver "github.com/badrootd/nibiru-cometbft/rpc/jsonrpc/server"
    42  	sm "github.com/badrootd/nibiru-cometbft/state"
    43  	"github.com/badrootd/nibiru-cometbft/state/indexer"
    44  	blockidxkv "github.com/badrootd/nibiru-cometbft/state/indexer/block/kv"
    45  	blockidxnull "github.com/badrootd/nibiru-cometbft/state/indexer/block/null"
    46  	"github.com/badrootd/nibiru-cometbft/state/indexer/sink/psql"
    47  	"github.com/badrootd/nibiru-cometbft/state/txindex"
    48  	"github.com/badrootd/nibiru-cometbft/state/txindex/kv"
    49  	"github.com/badrootd/nibiru-cometbft/state/txindex/null"
    50  	"github.com/badrootd/nibiru-cometbft/statesync"
    51  	"github.com/badrootd/nibiru-cometbft/store"
    52  	"github.com/badrootd/nibiru-cometbft/types"
    53  	cmttime "github.com/badrootd/nibiru-cometbft/types/time"
    54  	"github.com/badrootd/nibiru-cometbft/version"
    55  
    56  	_ "net/http/pprof" //nolint: gosec // securely exposed on separate, optional port
    57  
    58  	_ "github.com/lib/pq" // provide the psql db driver
    59  )
    60  
    61  //------------------------------------------------------------------------------
    62  
    63  // DBContext specifies config information for loading a new DB.
    64  type DBContext struct {
    65  	ID     string
    66  	Config *cfg.Config
    67  }
    68  
    69  // DBProvider takes a DBContext and returns an instantiated DB.
    70  type DBProvider func(*DBContext) (dbm.DB, error)
    71  
    72  const readHeaderTimeout = 10 * time.Second
    73  
    74  // DefaultDBProvider returns a database using the DBBackend and DBDir
    75  // specified in the ctx.Config.
    76  func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
    77  	dbType := dbm.BackendType(ctx.Config.DBBackend)
    78  	return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
    79  }
    80  
    81  // GenesisDocProvider returns a GenesisDoc.
    82  // It allows the GenesisDoc to be pulled from sources other than the
    83  // filesystem, for instance from a distributed key-value store cluster.
    84  type GenesisDocProvider func() (*types.GenesisDoc, error)
    85  
    86  // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
    87  // the GenesisDoc from the config.GenesisFile() on the filesystem.
    88  func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
    89  	return func() (*types.GenesisDoc, error) {
    90  		return types.GenesisDocFromFile(config.GenesisFile())
    91  	}
    92  }
    93  
    94  // Provider takes a config and a logger and returns a ready to go Node.
    95  type Provider func(*cfg.Config, log.Logger) (*Node, error)
    96  
    97  // DefaultNewNode returns a CometBFT node with default settings for the
    98  // PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
    99  // It implements NodeProvider.
   100  func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
   101  	nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
   102  	if err != nil {
   103  		return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err)
   104  	}
   105  
   106  	return NewNode(config,
   107  		privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()),
   108  		nodeKey,
   109  		proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
   110  		DefaultGenesisDocProviderFunc(config),
   111  		DefaultDBProvider,
   112  		DefaultMetricsProvider(config.Instrumentation),
   113  		logger,
   114  	)
   115  }
   116  
   117  // MetricsProvider returns a consensus, p2p and mempool Metrics.
   118  type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics, *proxy.Metrics)
   119  
   120  // DefaultMetricsProvider returns Metrics build using Prometheus client library
   121  // if Prometheus is enabled. Otherwise, it returns no-op Metrics.
   122  func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
   123  	return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics, *proxy.Metrics) {
   124  		if config.Prometheus {
   125  			return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   126  				p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   127  				mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   128  				sm.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   129  				proxy.PrometheusMetrics(config.Namespace, "chain_id", chainID)
   130  		}
   131  		return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics(), proxy.NopMetrics()
   132  	}
   133  }
   134  
   135  // Option sets a parameter for the node.
   136  type Option func(*Node)
   137  
   138  // Temporary interface for switching to block sync, we should get rid of v0 and v1 reactors.
   139  // See: https://github.com/tendermint/tendermint/issues/4595
   140  type blockSyncReactor interface {
   141  	SwitchToBlockSync(sm.State) error
   142  }
   143  
   144  // CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to
   145  // the node's Switch.
   146  //
   147  // WARNING: using any name from the below list of the existing reactors will
   148  // result in replacing it with the custom one.
   149  //
   150  //   - MEMPOOL
   151  //   - BLOCKCHAIN
   152  //   - CONSENSUS
   153  //   - EVIDENCE
   154  //   - PEX
   155  //   - STATESYNC
   156  func CustomReactors(reactors map[string]p2p.Reactor) Option {
   157  	return func(n *Node) {
   158  		for name, reactor := range reactors {
   159  			if existingReactor := n.sw.Reactor(name); existingReactor != nil {
   160  				n.sw.Logger.Info("Replacing existing reactor with a custom one",
   161  					"name", name, "existing", existingReactor, "custom", reactor)
   162  				n.sw.RemoveReactor(name, existingReactor)
   163  			}
   164  			n.sw.AddReactor(name, reactor)
   165  			// register the new channels to the nodeInfo
   166  			// NOTE: This is a bit messy now with the type casting but is
   167  			// cleaned up in the following version when NodeInfo is changed from
   168  			// and interface to a concrete type
   169  			if ni, ok := n.nodeInfo.(p2p.DefaultNodeInfo); ok {
   170  				for _, chDesc := range reactor.GetChannels() {
   171  					if !ni.HasChannel(chDesc.ID) {
   172  						ni.Channels = append(ni.Channels, chDesc.ID)
   173  						n.transport.AddChannel(chDesc.ID)
   174  					}
   175  				}
   176  				n.nodeInfo = ni
   177  			} else {
   178  				n.Logger.Error("Node info is not of type DefaultNodeInfo. Custom reactor channels can not be added.")
   179  			}
   180  		}
   181  	}
   182  }
   183  
   184  // StateProvider overrides the state provider used by state sync to retrieve trusted app hashes and
   185  // build a State object for bootstrapping the node.
   186  // WARNING: this interface is considered unstable and subject to change.
   187  func StateProvider(stateProvider statesync.StateProvider) Option {
   188  	return func(n *Node) {
   189  		n.stateSyncProvider = stateProvider
   190  	}
   191  }
   192  
   193  // BootstrapState synchronizes the stores with the application after state sync
   194  // has been performed offline. It is expected that the block store and state
   195  // store are empty at the time the function is called.
   196  //
   197  // If the block store is not empty, the function returns an error.
   198  func BootstrapState(ctx context.Context, config *cfg.Config, dbProvider DBProvider, height uint64, appHash []byte) (err error) {
   199  	logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
   200  	if ctx == nil {
   201  		ctx = context.Background()
   202  	}
   203  
   204  	if config == nil {
   205  		logger.Info("no config provided, using default configuration")
   206  		config = cfg.DefaultConfig()
   207  	}
   208  
   209  	if dbProvider == nil {
   210  		dbProvider = DefaultDBProvider
   211  	}
   212  	blockStore, stateDB, err := initDBs(config, dbProvider)
   213  
   214  	defer func() {
   215  		if derr := blockStore.Close(); derr != nil {
   216  			logger.Error("Failed to close blockstore", "err", derr)
   217  			// Set the return value
   218  			err = derr
   219  		}
   220  	}()
   221  
   222  	if err != nil {
   223  		return err
   224  	}
   225  
   226  	if !blockStore.IsEmpty() {
   227  		return fmt.Errorf("blockstore not empty, trying to initialize non empty state")
   228  	}
   229  
   230  	stateStore := sm.NewBootstrapStore(stateDB, sm.StoreOptions{
   231  		DiscardABCIResponses: config.Storage.DiscardABCIResponses,
   232  	})
   233  
   234  	defer func() {
   235  		if derr := stateStore.Close(); derr != nil {
   236  			logger.Error("Failed to close statestore", "err", derr)
   237  			// Set the return value
   238  			err = derr
   239  		}
   240  	}()
   241  	state, err := stateStore.Load()
   242  	if err != nil {
   243  		return err
   244  	}
   245  
   246  	if !state.IsEmpty() {
   247  		return fmt.Errorf("state not empty, trying to initialize non empty state")
   248  	}
   249  
   250  	genState, _, err := LoadStateFromDBOrGenesisDocProvider(stateDB, DefaultGenesisDocProviderFunc(config))
   251  	if err != nil {
   252  		return err
   253  	}
   254  
   255  	stateProvider, err := statesync.NewLightClientStateProvider(
   256  		ctx,
   257  		genState.ChainID, genState.Version, genState.InitialHeight,
   258  		config.StateSync.RPCServers, light.TrustOptions{
   259  			Period: config.StateSync.TrustPeriod,
   260  			Height: config.StateSync.TrustHeight,
   261  			Hash:   config.StateSync.TrustHashBytes(),
   262  		}, logger.With("module", "light"))
   263  	if err != nil {
   264  		return fmt.Errorf("failed to set up light client state provider: %w", err)
   265  	}
   266  
   267  	state, err = stateProvider.State(ctx, height)
   268  	if err != nil {
   269  		return err
   270  	}
   271  	if appHash == nil {
   272  		logger.Info("warning: cannot verify appHash. Verification will happen when node boots up!")
   273  	} else {
   274  		if !bytes.Equal(appHash, state.AppHash) {
   275  			if err := blockStore.Close(); err != nil {
   276  				logger.Error("failed to close blockstore: %w", err)
   277  			}
   278  			if err := stateStore.Close(); err != nil {
   279  				logger.Error("failed to close statestore: %w", err)
   280  			}
   281  			return fmt.Errorf("the app hash returned by the light client does not match the provided appHash, expected %X, got %X", state.AppHash, appHash)
   282  		}
   283  	}
   284  
   285  	commit, err := stateProvider.Commit(ctx, height)
   286  	if err != nil {
   287  		return err
   288  	}
   289  
   290  	if err = stateStore.Bootstrap(state); err != nil {
   291  		return err
   292  	}
   293  
   294  	err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit)
   295  	if err != nil {
   296  		return err
   297  	}
   298  
   299  	// Once the stores are bootstrapped, we need to set the height at which the node has finished
   300  	// statesyncing. This will allow the blocksync reactor to fetch blocks at a proper height.
   301  	// In case this operation fails, it is equivalent to a failure in  online state sync where the operator
   302  	// needs to manually delete the state and blockstores and rerun the bootstrapping process.
   303  	err = stateStore.SetOfflineStateSyncHeight(state.LastBlockHeight)
   304  	if err != nil {
   305  		return fmt.Errorf("failed to set synced height: %w", err)
   306  	}
   307  
   308  	return err
   309  }
   310  
   311  //------------------------------------------------------------------------------
   312  
   313  // Node is the highest level interface to a full CometBFT node.
   314  // It includes all configuration information and running services.
   315  type Node struct {
   316  	service.BaseService
   317  
   318  	// config
   319  	config        *cfg.Config
   320  	genesisDoc    *types.GenesisDoc   // initial validator set
   321  	privValidator types.PrivValidator // local node's validator key
   322  
   323  	// network
   324  	transport   *p2p.MultiplexTransport
   325  	sw          *p2p.Switch  // p2p connections
   326  	addrBook    pex.AddrBook // known peers
   327  	nodeInfo    p2p.NodeInfo
   328  	nodeKey     *p2p.NodeKey // our node privkey
   329  	isListening bool
   330  
   331  	// services
   332  	eventBus          *types.EventBus // pub/sub for services
   333  	stateStore        sm.Store
   334  	blockStore        *store.BlockStore // store the blockchain to disk
   335  	bcReactor         p2p.Reactor       // for block-syncing
   336  	mempoolReactor    p2p.Reactor       // for gossipping transactions
   337  	mempool           mempl.Mempool
   338  	stateSync         bool                    // whether the node should state sync on startup
   339  	stateSyncReactor  *statesync.Reactor      // for hosting and restoring state sync snapshots
   340  	stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node
   341  	stateSyncGenesis  sm.State                // provides the genesis state for state sync
   342  	consensusState    *cs.State               // latest consensus state
   343  	consensusReactor  *cs.Reactor             // for participating in the consensus
   344  	pexReactor        *pex.Reactor            // for exchanging peer addresses
   345  	evidencePool      *evidence.Pool          // tracking evidence
   346  	proxyApp          proxy.AppConns          // connection to the application
   347  	rpcListeners      []net.Listener          // rpc servers
   348  	txIndexer         txindex.TxIndexer
   349  	blockIndexer      indexer.BlockIndexer
   350  	indexerService    *txindex.IndexerService
   351  	prometheusSrv     *http.Server
   352  }
   353  
   354  func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
   355  	var blockStoreDB dbm.DB
   356  	blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
   357  	if err != nil {
   358  		return
   359  	}
   360  	blockStore = store.NewBlockStore(blockStoreDB)
   361  
   362  	stateDB, err = dbProvider(&DBContext{"state", config})
   363  	if err != nil {
   364  		return
   365  	}
   366  
   367  	return
   368  }
   369  
   370  func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger, metrics *proxy.Metrics) (proxy.AppConns, error) {
   371  	proxyApp := proxy.NewAppConns(clientCreator, metrics)
   372  	proxyApp.SetLogger(logger.With("module", "proxy"))
   373  	if err := proxyApp.Start(); err != nil {
   374  		return nil, fmt.Errorf("error starting proxy app connections: %v", err)
   375  	}
   376  	return proxyApp, nil
   377  }
   378  
   379  func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
   380  	eventBus := types.NewEventBus()
   381  	eventBus.SetLogger(logger.With("module", "events"))
   382  	if err := eventBus.Start(); err != nil {
   383  		return nil, err
   384  	}
   385  	return eventBus, nil
   386  }
   387  
   388  func createAndStartIndexerService(
   389  	config *cfg.Config,
   390  	chainID string,
   391  	dbProvider DBProvider,
   392  	eventBus *types.EventBus,
   393  	logger log.Logger,
   394  ) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) {
   395  	var (
   396  		txIndexer    txindex.TxIndexer
   397  		blockIndexer indexer.BlockIndexer
   398  	)
   399  
   400  	switch config.TxIndex.Indexer {
   401  	case "kv":
   402  		store, err := dbProvider(&DBContext{"tx_index", config})
   403  		if err != nil {
   404  			return nil, nil, nil, err
   405  		}
   406  
   407  		txIndexer = kv.NewTxIndex(store)
   408  		blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events")))
   409  
   410  	case "psql":
   411  		if config.TxIndex.PsqlConn == "" {
   412  			return nil, nil, nil, errors.New(`no psql-conn is set for the "psql" indexer`)
   413  		}
   414  		es, err := psql.NewEventSink(config.TxIndex.PsqlConn, chainID)
   415  		if err != nil {
   416  			return nil, nil, nil, fmt.Errorf("creating psql indexer: %w", err)
   417  		}
   418  		txIndexer = es.TxIndexer()
   419  		blockIndexer = es.BlockIndexer()
   420  
   421  	default:
   422  		txIndexer = &null.TxIndex{}
   423  		blockIndexer = &blockidxnull.BlockerIndexer{}
   424  	}
   425  
   426  	indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus, false)
   427  	indexerService.SetLogger(logger.With("module", "txindex"))
   428  
   429  	if err := indexerService.Start(); err != nil {
   430  		return nil, nil, nil, err
   431  	}
   432  
   433  	return indexerService, txIndexer, blockIndexer, nil
   434  }
   435  
   436  func doHandshake(
   437  	ctx context.Context,
   438  	stateStore sm.Store,
   439  	state sm.State,
   440  	blockStore sm.BlockStore,
   441  	genDoc *types.GenesisDoc,
   442  	eventBus types.BlockEventPublisher,
   443  	proxyApp proxy.AppConns,
   444  	consensusLogger log.Logger,
   445  ) error {
   446  	handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc)
   447  	handshaker.SetLogger(consensusLogger)
   448  	handshaker.SetEventBus(eventBus)
   449  	if err := handshaker.HandshakeWithContext(ctx, proxyApp); err != nil {
   450  		return fmt.Errorf("error during handshake: %v", err)
   451  	}
   452  	return nil
   453  }
   454  
   455  func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) {
   456  	// Log the version info.
   457  	logger.Info("Version info",
   458  		"tendermint_version", version.TMCoreSemVer,
   459  		"abci", version.ABCISemVer,
   460  		"block", version.BlockProtocol,
   461  		"p2p", version.P2PProtocol,
   462  		"commit_hash", version.TMGitCommitHash,
   463  	)
   464  
   465  	// If the state and software differ in block version, at least log it.
   466  	if state.Version.Consensus.Block != version.BlockProtocol {
   467  		logger.Info("Software and state have different block protocols",
   468  			"software", version.BlockProtocol,
   469  			"state", state.Version.Consensus.Block,
   470  		)
   471  	}
   472  
   473  	addr := pubKey.Address()
   474  	// Log whether this node is a validator or an observer
   475  	if state.Validators.HasAddress(addr) {
   476  		consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey)
   477  	} else {
   478  		consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
   479  	}
   480  }
   481  
   482  func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
   483  	if state.Validators.Size() > 1 {
   484  		return false
   485  	}
   486  	addr, _ := state.Validators.GetByIndex(0)
   487  	return bytes.Equal(pubKey.Address(), addr)
   488  }
   489  
   490  func createMempoolAndMempoolReactor(
   491  	config *cfg.Config,
   492  	proxyApp proxy.AppConns,
   493  	state sm.State,
   494  	memplMetrics *mempl.Metrics,
   495  	logger log.Logger,
   496  ) (mempl.Mempool, p2p.Reactor) {
   497  	switch config.Mempool.Type {
   498  	// allow empty string for backward compatibility
   499  	case cfg.MempoolTypeFlood, "":
   500  		switch config.Mempool.Version {
   501  		case cfg.MempoolV1:
   502  			mp := mempoolv1.NewTxMempool(
   503  				logger,
   504  				config.Mempool,
   505  				proxyApp.Mempool(),
   506  				state.LastBlockHeight,
   507  				mempoolv1.WithMetrics(memplMetrics),
   508  				mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
   509  				mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
   510  			)
   511  
   512  			reactor := mempoolv1.NewReactor(
   513  				config.Mempool,
   514  				mp,
   515  			)
   516  			if config.Consensus.WaitForTxs() {
   517  				mp.EnableTxsAvailable()
   518  			}
   519  
   520  			return mp, reactor
   521  
   522  		case cfg.MempoolV0:
   523  			mp := mempoolv0.NewCListMempool(
   524  				config.Mempool,
   525  				proxyApp.Mempool(),
   526  				state.LastBlockHeight,
   527  				mempoolv0.WithMetrics(memplMetrics),
   528  				mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
   529  				mempoolv0.WithPostCheck(sm.TxPostCheck(state)),
   530  			)
   531  
   532  			mp.SetLogger(logger)
   533  
   534  			reactor := mempoolv0.NewReactor(
   535  				config.Mempool,
   536  				mp,
   537  			)
   538  			if config.Consensus.WaitForTxs() {
   539  				mp.EnableTxsAvailable()
   540  			}
   541  
   542  			return mp, reactor
   543  
   544  		default:
   545  			return nil, nil
   546  		}
   547  	case cfg.MempoolTypeNop:
   548  		// Strictly speaking, there's no need to have a `mempl.NopMempoolReactor`, but
   549  		// adding it leads to a cleaner code.
   550  		return &mempl.NopMempool{}, mempl.NewNopMempoolReactor()
   551  	default:
   552  		panic(fmt.Sprintf("unknown mempool type: %q", config.Mempool.Type))
   553  	}
   554  }
   555  
   556  func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
   557  	stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger,
   558  ) (*evidence.Reactor, *evidence.Pool, error) {
   559  	evidenceDB, err := dbProvider(&DBContext{"evidence", config})
   560  	if err != nil {
   561  		return nil, nil, err
   562  	}
   563  	evidenceLogger := logger.With("module", "evidence")
   564  	evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB, sm.StoreOptions{
   565  		DiscardABCIResponses: config.Storage.DiscardABCIResponses,
   566  	}), blockStore)
   567  	if err != nil {
   568  		return nil, nil, err
   569  	}
   570  	evidenceReactor := evidence.NewReactor(evidencePool)
   571  	evidenceReactor.SetLogger(evidenceLogger)
   572  	return evidenceReactor, evidencePool, nil
   573  }
   574  
   575  func createBlockchainReactor(config *cfg.Config,
   576  	state sm.State,
   577  	blockExec *sm.BlockExecutor,
   578  	blockStore *store.BlockStore,
   579  	blockSync bool,
   580  	logger log.Logger,
   581  	offlineStateSyncHeight int64,
   582  ) (bcReactor p2p.Reactor, err error) {
   583  	switch config.BlockSync.Version {
   584  	case "v0":
   585  		bcReactor = bc.NewReactorWithOfflineStateSync(state.Copy(), blockExec, blockStore, blockSync, offlineStateSyncHeight)
   586  	case "v1", "v2":
   587  		return nil, fmt.Errorf("block sync version %s has been deprecated. Please use v0", config.BlockSync.Version)
   588  	default:
   589  		return nil, fmt.Errorf("unknown block sync version %s", config.BlockSync.Version)
   590  	}
   591  
   592  	bcReactor.SetLogger(logger.With("module", "blockchain"))
   593  	return bcReactor, nil
   594  }
   595  
   596  func createConsensusReactor(config *cfg.Config,
   597  	state sm.State,
   598  	blockExec *sm.BlockExecutor,
   599  	blockStore sm.BlockStore,
   600  	mempool mempl.Mempool,
   601  	evidencePool *evidence.Pool,
   602  	privValidator types.PrivValidator,
   603  	csMetrics *cs.Metrics,
   604  	waitSync bool,
   605  	eventBus *types.EventBus,
   606  	consensusLogger log.Logger,
   607  ) (*cs.Reactor, *cs.State) {
   608  	consensusState := cs.NewState(
   609  		config.Consensus,
   610  		state.Copy(),
   611  		blockExec,
   612  		blockStore,
   613  		mempool,
   614  		evidencePool,
   615  		cs.StateMetrics(csMetrics),
   616  	)
   617  	consensusState.SetLogger(consensusLogger)
   618  	if privValidator != nil {
   619  		consensusState.SetPrivValidator(privValidator)
   620  	}
   621  	consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics))
   622  	consensusReactor.SetLogger(consensusLogger)
   623  	// services which will be publishing and/or subscribing for messages (events)
   624  	// consensusReactor will set it on consensusState and blockExecutor
   625  	consensusReactor.SetEventBus(eventBus)
   626  	return consensusReactor, consensusState
   627  }
   628  
   629  func createTransport(
   630  	config *cfg.Config,
   631  	nodeInfo p2p.NodeInfo,
   632  	nodeKey *p2p.NodeKey,
   633  	proxyApp proxy.AppConns,
   634  ) (
   635  	*p2p.MultiplexTransport,
   636  	[]p2p.PeerFilterFunc,
   637  ) {
   638  	var (
   639  		mConnConfig = p2p.MConnConfig(config.P2P)
   640  		transport   = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig)
   641  		connFilters = []p2p.ConnFilterFunc{}
   642  		peerFilters = []p2p.PeerFilterFunc{}
   643  	)
   644  
   645  	if !config.P2P.AllowDuplicateIP {
   646  		connFilters = append(connFilters, p2p.ConnDuplicateIPFilter())
   647  	}
   648  
   649  	// Filter peers by addr or pubkey with an ABCI query.
   650  	// If the query return code is OK, add peer.
   651  	if config.FilterPeers {
   652  		connFilters = append(
   653  			connFilters,
   654  			// ABCI query for address filtering.
   655  			func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
   656  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   657  					Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
   658  				})
   659  				if err != nil {
   660  					return err
   661  				}
   662  				if res.IsErr() {
   663  					return fmt.Errorf("error querying abci app: %v", res)
   664  				}
   665  
   666  				return nil
   667  			},
   668  		)
   669  
   670  		peerFilters = append(
   671  			peerFilters,
   672  			// ABCI query for ID filtering.
   673  			func(_ p2p.IPeerSet, p p2p.Peer) error {
   674  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   675  					Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
   676  				})
   677  				if err != nil {
   678  					return err
   679  				}
   680  				if res.IsErr() {
   681  					return fmt.Errorf("error querying abci app: %v", res)
   682  				}
   683  
   684  				return nil
   685  			},
   686  		)
   687  	}
   688  
   689  	p2p.MultiplexTransportConnFilters(connFilters...)(transport)
   690  
   691  	// Limit the number of incoming connections.
   692  	max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   693  	p2p.MultiplexTransportMaxIncomingConnections(max)(transport)
   694  
   695  	return transport, peerFilters
   696  }
   697  
   698  func createSwitch(config *cfg.Config,
   699  	transport p2p.Transport,
   700  	p2pMetrics *p2p.Metrics,
   701  	peerFilters []p2p.PeerFilterFunc,
   702  	mempoolReactor p2p.Reactor,
   703  	bcReactor p2p.Reactor,
   704  	stateSyncReactor *statesync.Reactor,
   705  	consensusReactor *cs.Reactor,
   706  	evidenceReactor *evidence.Reactor,
   707  	nodeInfo p2p.NodeInfo,
   708  	nodeKey *p2p.NodeKey,
   709  	p2pLogger log.Logger,
   710  ) *p2p.Switch {
   711  	sw := p2p.NewSwitch(
   712  		config.P2P,
   713  		transport,
   714  		p2p.WithMetrics(p2pMetrics),
   715  		p2p.SwitchPeerFilters(peerFilters...),
   716  	)
   717  	sw.SetLogger(p2pLogger)
   718  	if config.Mempool.Type != cfg.MempoolTypeNop {
   719  		sw.AddReactor("MEMPOOL", mempoolReactor)
   720  	}
   721  	sw.AddReactor("BLOCKCHAIN", bcReactor)
   722  	sw.AddReactor("CONSENSUS", consensusReactor)
   723  	sw.AddReactor("EVIDENCE", evidenceReactor)
   724  	sw.AddReactor("STATESYNC", stateSyncReactor)
   725  
   726  	sw.SetNodeInfo(nodeInfo)
   727  	sw.SetNodeKey(nodeKey)
   728  
   729  	p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
   730  	return sw
   731  }
   732  
   733  func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
   734  	p2pLogger log.Logger, nodeKey *p2p.NodeKey,
   735  ) (pex.AddrBook, error) {
   736  	addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
   737  	addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
   738  
   739  	// Add ourselves to addrbook to prevent dialing ourselves
   740  	if config.P2P.ExternalAddress != "" {
   741  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress))
   742  		if err != nil {
   743  			return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err)
   744  		}
   745  		addrBook.AddOurAddress(addr)
   746  	}
   747  	if config.P2P.ListenAddress != "" {
   748  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress))
   749  		if err != nil {
   750  			return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err)
   751  		}
   752  		addrBook.AddOurAddress(addr)
   753  	}
   754  
   755  	sw.SetAddrBook(addrBook)
   756  
   757  	return addrBook, nil
   758  }
   759  
   760  func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
   761  	sw *p2p.Switch, logger log.Logger,
   762  ) *pex.Reactor {
   763  	// TODO persistent peers ? so we can have their DNS addrs saved
   764  	pexReactor := pex.NewReactor(addrBook,
   765  		&pex.ReactorConfig{
   766  			Seeds:    splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
   767  			SeedMode: config.P2P.SeedMode,
   768  			// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
   769  			// blocks assuming 10s blocks ~ 28 hours.
   770  			// TODO (melekes): make it dynamic based on the actual block latencies
   771  			// from the live network.
   772  			// https://github.com/tendermint/tendermint/issues/3523
   773  			SeedDisconnectWaitPeriod:     28 * time.Hour,
   774  			PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
   775  		})
   776  	pexReactor.SetLogger(logger.With("module", "pex"))
   777  	sw.AddReactor("PEX", pexReactor)
   778  	return pexReactor
   779  }
   780  
   781  // startStateSync starts an asynchronous state sync process, then switches to block sync mode.
   782  func startStateSync(ssR *statesync.Reactor, bcR blockSyncReactor, conR *cs.Reactor,
   783  	stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, blockSync bool,
   784  	stateStore sm.Store, blockStore *store.BlockStore, state sm.State,
   785  ) error {
   786  	ssR.Logger.Info("Starting state sync")
   787  
   788  	if stateProvider == nil {
   789  		var err error
   790  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   791  		defer cancel()
   792  		stateProvider, err = statesync.NewLightClientStateProvider(
   793  			ctx,
   794  			state.ChainID, state.Version, state.InitialHeight,
   795  			config.RPCServers, light.TrustOptions{
   796  				Period: config.TrustPeriod,
   797  				Height: config.TrustHeight,
   798  				Hash:   config.TrustHashBytes(),
   799  			}, ssR.Logger.With("module", "light"))
   800  		if err != nil {
   801  			return fmt.Errorf("failed to set up light client state provider: %w", err)
   802  		}
   803  	}
   804  
   805  	go func() {
   806  		state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime)
   807  		if err != nil {
   808  			ssR.Logger.Error("State sync failed", "err", err)
   809  			return
   810  		}
   811  		err = stateStore.Bootstrap(state)
   812  		if err != nil {
   813  			ssR.Logger.Error("Failed to bootstrap node with new state", "err", err)
   814  			return
   815  		}
   816  		err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit)
   817  		if err != nil {
   818  			ssR.Logger.Error("Failed to store last seen commit", "err", err)
   819  			return
   820  		}
   821  
   822  		if blockSync {
   823  			// FIXME Very ugly to have these metrics bleed through here.
   824  			conR.Metrics.StateSyncing.Set(0)
   825  			conR.Metrics.BlockSyncing.Set(1)
   826  			err = bcR.SwitchToBlockSync(state)
   827  			if err != nil {
   828  				ssR.Logger.Error("Failed to switch to block sync", "err", err)
   829  				return
   830  			}
   831  		} else {
   832  			conR.SwitchToConsensus(state, true)
   833  		}
   834  	}()
   835  	return nil
   836  }
   837  
   838  // NewNode returns a new, ready to go, CometBFT Node.
   839  func NewNode(config *cfg.Config,
   840  	privValidator types.PrivValidator,
   841  	nodeKey *p2p.NodeKey,
   842  	clientCreator proxy.ClientCreator,
   843  	genesisDocProvider GenesisDocProvider,
   844  	dbProvider DBProvider,
   845  	metricsProvider MetricsProvider,
   846  	logger log.Logger,
   847  	options ...Option,
   848  ) (*Node, error) {
   849  	return NewNodeWithContext(context.TODO(), config, privValidator,
   850  		nodeKey, clientCreator, genesisDocProvider, dbProvider,
   851  		metricsProvider, logger, options...)
   852  }
   853  
   854  func NewNodeWithContext(ctx context.Context,
   855  	config *cfg.Config,
   856  	privValidator types.PrivValidator,
   857  	nodeKey *p2p.NodeKey,
   858  	clientCreator proxy.ClientCreator,
   859  	genesisDocProvider GenesisDocProvider,
   860  	dbProvider DBProvider,
   861  	metricsProvider MetricsProvider,
   862  	logger log.Logger,
   863  	options ...Option,
   864  ) (*Node, error) {
   865  
   866  	blockStore, stateDB, err := initDBs(config, dbProvider)
   867  	if err != nil {
   868  		return nil, err
   869  	}
   870  
   871  	stateStore := sm.NewBootstrapStore(stateDB, sm.StoreOptions{
   872  		DiscardABCIResponses: config.Storage.DiscardABCIResponses,
   873  	})
   874  
   875  	state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
   876  	if err != nil {
   877  		return nil, err
   878  	}
   879  
   880  	csMetrics, p2pMetrics, memplMetrics, smMetrics, abciMetrics := metricsProvider(genDoc.ChainID)
   881  
   882  	// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
   883  	proxyApp, err := createAndStartProxyAppConns(clientCreator, logger, abciMetrics)
   884  	if err != nil {
   885  		return nil, err
   886  	}
   887  
   888  	// EventBus and IndexerService must be started before the handshake because
   889  	// we might need to index the txs of the replayed block as this might not have happened
   890  	// when the node stopped last time (i.e. the node stopped after it saved the block
   891  	// but before it indexed the txs, or, endblocker panicked)
   892  	eventBus, err := createAndStartEventBus(logger)
   893  	if err != nil {
   894  		return nil, err
   895  	}
   896  
   897  	indexerService, txIndexer, blockIndexer, err := createAndStartIndexerService(config,
   898  		genDoc.ChainID, dbProvider, eventBus, logger)
   899  	if err != nil {
   900  		return nil, err
   901  	}
   902  
   903  	// If an address is provided, listen on the socket for a connection from an
   904  	// external signing process.
   905  	if config.PrivValidatorListenAddr != "" {
   906  		// FIXME: we should start services inside OnStart
   907  		privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, genDoc.ChainID, logger)
   908  		if err != nil {
   909  			return nil, fmt.Errorf("error with private validator socket client: %w", err)
   910  		}
   911  	}
   912  
   913  	pubKey, err := privValidator.GetPubKey()
   914  	if err != nil {
   915  		return nil, fmt.Errorf("can't get pubkey: %w", err)
   916  	}
   917  
   918  	// Determine whether we should attempt state sync.
   919  	stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey)
   920  	if stateSync && state.LastBlockHeight > 0 {
   921  		logger.Info("Found local state with non-zero height, skipping state sync")
   922  		stateSync = false
   923  	}
   924  
   925  	// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
   926  	// and replays any blocks as necessary to sync CometBFT with the app.
   927  	consensusLogger := logger.With("module", "consensus")
   928  	if !stateSync {
   929  		if err := doHandshake(ctx, stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil {
   930  			return nil, err
   931  		}
   932  
   933  		// Reload the state. It will have the Version.Consensus.App set by the
   934  		// Handshake, and may have other modifications as well (ie. depending on
   935  		// what happened during block replay).
   936  		state, err = stateStore.Load()
   937  		if err != nil {
   938  			return nil, fmt.Errorf("cannot load state: %w", err)
   939  		}
   940  	}
   941  
   942  	// Determine whether we should do block sync. This must happen after the handshake, since the
   943  	// app may modify the validator set, specifying ourself as the only validator.
   944  	blockSync := config.BlockSyncMode && !onlyValidatorIsUs(state, pubKey)
   945  
   946  	logNodeStartupInfo(state, pubKey, logger, consensusLogger)
   947  
   948  	mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
   949  
   950  	evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger)
   951  
   952  	if err != nil {
   953  		return nil, err
   954  	}
   955  
   956  	// make block executor for consensus and blockchain reactors to execute blocks
   957  	blockExec := sm.NewBlockExecutor(
   958  		stateStore,
   959  		logger.With("module", "state"),
   960  		proxyApp.Consensus(),
   961  		mempool,
   962  		evidencePool,
   963  		sm.BlockExecutorWithMetrics(smMetrics),
   964  	)
   965  	offlineStateSyncHeight := int64(0)
   966  	if blockStore.Height() == 0 {
   967  		offlineStateSyncHeight, err = stateStore.GetOfflineStateSyncHeight()
   968  		if err != nil && err.Error() != "value empty" {
   969  			panic(fmt.Sprintf("failed to retrieve statesynced height from store %s; expected state store height to be %v", err, state.LastBlockHeight))
   970  		}
   971  	}
   972  	// Make BlockchainReactor. Don't start block sync if we're doing a state sync first.
   973  	bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, blockSync && !stateSync, logger, offlineStateSyncHeight)
   974  	if err != nil {
   975  		return nil, fmt.Errorf("could not create blockchain reactor: %w", err)
   976  	}
   977  
   978  	// Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first.
   979  	// FIXME We need to update metrics here, since other reactors don't have access to them.
   980  	if stateSync {
   981  		csMetrics.StateSyncing.Set(1)
   982  	} else if blockSync {
   983  		csMetrics.BlockSyncing.Set(1)
   984  	}
   985  
   986  	consensusReactor, consensusState := createConsensusReactor(
   987  		config, state, blockExec, blockStore, mempool, evidencePool,
   988  		privValidator, csMetrics, stateSync || blockSync, eventBus, consensusLogger,
   989  	)
   990  	err = stateStore.SetOfflineStateSyncHeight(0)
   991  	if err != nil {
   992  		panic(fmt.Sprintf("failed to reset the offline state sync height %s", err))
   993  	}
   994  
   995  	// Set up state sync reactor, and schedule a sync if requested.
   996  	// FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy,
   997  	// we should clean this whole thing up. See:
   998  	// https://github.com/tendermint/tendermint/issues/4644
   999  	stateSyncReactor := statesync.NewReactor(
  1000  		*config.StateSync,
  1001  		proxyApp.Snapshot(),
  1002  		proxyApp.Query(),
  1003  		config.StateSync.TempDir,
  1004  	)
  1005  	stateSyncReactor.SetLogger(logger.With("module", "statesync"))
  1006  
  1007  	nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state)
  1008  	if err != nil {
  1009  		return nil, err
  1010  	}
  1011  
  1012  	transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp)
  1013  
  1014  	p2pLogger := logger.With("module", "p2p")
  1015  	sw := createSwitch(
  1016  		config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor,
  1017  		stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger,
  1018  	)
  1019  
  1020  	err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
  1021  	if err != nil {
  1022  		return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err)
  1023  	}
  1024  
  1025  	err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
  1026  	if err != nil {
  1027  		return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
  1028  	}
  1029  
  1030  	addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
  1031  	if err != nil {
  1032  		return nil, fmt.Errorf("could not create addrbook: %w", err)
  1033  	}
  1034  
  1035  	// Optionally, start the pex reactor
  1036  	//
  1037  	// TODO:
  1038  	//
  1039  	// We need to set Seeds and PersistentPeers on the switch,
  1040  	// since it needs to be able to use these (and their DNS names)
  1041  	// even if the PEX is off. We can include the DNS name in the NetAddress,
  1042  	// but it would still be nice to have a clear list of the current "PersistentPeers"
  1043  	// somewhere that we can return with net_info.
  1044  	//
  1045  	// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
  1046  	// Note we currently use the addrBook regardless at least for AddOurAddress
  1047  	var pexReactor *pex.Reactor
  1048  	if config.P2P.PexReactor {
  1049  		pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
  1050  	}
  1051  
  1052  	if config.RPC.PprofListenAddress != "" {
  1053  		go func() {
  1054  			logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress)
  1055  			//nolint:gosec,nolintlint // G114: Use of net/http serve function that has no support for setting timeouts
  1056  			logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil))
  1057  		}()
  1058  	}
  1059  
  1060  	node := &Node{
  1061  		config:        config,
  1062  		genesisDoc:    genDoc,
  1063  		privValidator: privValidator,
  1064  
  1065  		transport: transport,
  1066  		sw:        sw,
  1067  		addrBook:  addrBook,
  1068  		nodeInfo:  nodeInfo,
  1069  		nodeKey:   nodeKey,
  1070  
  1071  		stateStore:       stateStore,
  1072  		blockStore:       blockStore,
  1073  		bcReactor:        bcReactor,
  1074  		mempoolReactor:   mempoolReactor,
  1075  		mempool:          mempool,
  1076  		consensusState:   consensusState,
  1077  		consensusReactor: consensusReactor,
  1078  		stateSyncReactor: stateSyncReactor,
  1079  		stateSync:        stateSync,
  1080  		stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state
  1081  		pexReactor:       pexReactor,
  1082  		evidencePool:     evidencePool,
  1083  		proxyApp:         proxyApp,
  1084  		txIndexer:        txIndexer,
  1085  		indexerService:   indexerService,
  1086  		blockIndexer:     blockIndexer,
  1087  		eventBus:         eventBus,
  1088  	}
  1089  	node.BaseService = *service.NewBaseService(logger, "Node", node)
  1090  
  1091  	for _, option := range options {
  1092  		option(node)
  1093  	}
  1094  
  1095  	return node, nil
  1096  }
  1097  
  1098  // OnStart starts the Node. It implements service.Service.
  1099  func (n *Node) OnStart() error {
  1100  	now := cmttime.Now()
  1101  	genTime := n.genesisDoc.GenesisTime
  1102  	if genTime.After(now) {
  1103  		n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime)
  1104  		time.Sleep(genTime.Sub(now))
  1105  	}
  1106  
  1107  	// Add private IDs to addrbook to block those peers being added
  1108  	n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
  1109  
  1110  	// Start the RPC server before the P2P server
  1111  	// so we can eg. receive txs for the first block
  1112  	if n.config.RPC.ListenAddress != "" {
  1113  		listeners, err := n.startRPC()
  1114  		if err != nil {
  1115  			return err
  1116  		}
  1117  		n.rpcListeners = listeners
  1118  	}
  1119  
  1120  	if n.config.Instrumentation.Prometheus &&
  1121  		n.config.Instrumentation.PrometheusListenAddr != "" {
  1122  		n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
  1123  	}
  1124  
  1125  	// Start the transport.
  1126  	addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress))
  1127  	if err != nil {
  1128  		return err
  1129  	}
  1130  	if err := n.transport.Listen(*addr); err != nil {
  1131  		return err
  1132  	}
  1133  
  1134  	n.isListening = true
  1135  
  1136  	// Start the switch (the P2P server).
  1137  	err = n.sw.Start()
  1138  	if err != nil {
  1139  		return err
  1140  	}
  1141  
  1142  	// Always connect to persistent peers
  1143  	err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
  1144  	if err != nil {
  1145  		return fmt.Errorf("could not dial peers from persistent_peers field: %w", err)
  1146  	}
  1147  
  1148  	// Run state sync
  1149  	if n.stateSync {
  1150  		bcR, ok := n.bcReactor.(blockSyncReactor)
  1151  		if !ok {
  1152  			return fmt.Errorf("this blockchain reactor does not support switching from state sync")
  1153  		}
  1154  		err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider,
  1155  			n.config.StateSync, n.config.BlockSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis)
  1156  		if err != nil {
  1157  			return fmt.Errorf("failed to start state sync: %w", err)
  1158  		}
  1159  	}
  1160  
  1161  	return nil
  1162  }
  1163  
  1164  // OnStop stops the Node. It implements service.Service.
  1165  func (n *Node) OnStop() {
  1166  	n.BaseService.OnStop()
  1167  
  1168  	n.Logger.Info("Stopping Node")
  1169  
  1170  	// first stop the non-reactor services
  1171  	if err := n.eventBus.Stop(); err != nil {
  1172  		n.Logger.Error("Error closing eventBus", "err", err)
  1173  	}
  1174  	if err := n.indexerService.Stop(); err != nil {
  1175  		n.Logger.Error("Error closing indexerService", "err", err)
  1176  	}
  1177  
  1178  	// now stop the reactors
  1179  	if err := n.sw.Stop(); err != nil {
  1180  		n.Logger.Error("Error closing switch", "err", err)
  1181  	}
  1182  
  1183  	if err := n.transport.Close(); err != nil {
  1184  		n.Logger.Error("Error closing transport", "err", err)
  1185  	}
  1186  
  1187  	n.isListening = false
  1188  
  1189  	// finally stop the listeners / external services
  1190  	for _, l := range n.rpcListeners {
  1191  		n.Logger.Info("Closing rpc listener", "listener", l)
  1192  		if err := l.Close(); err != nil {
  1193  			n.Logger.Error("Error closing listener", "listener", l, "err", err)
  1194  		}
  1195  	}
  1196  
  1197  	if pvsc, ok := n.privValidator.(service.Service); ok {
  1198  		if err := pvsc.Stop(); err != nil {
  1199  			n.Logger.Error("Error closing private validator", "err", err)
  1200  		}
  1201  	}
  1202  
  1203  	if n.prometheusSrv != nil {
  1204  		if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
  1205  			// Error from closing listeners, or context timeout:
  1206  			n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
  1207  		}
  1208  	}
  1209  	if n.blockStore != nil {
  1210  		n.Logger.Info("Closing blockstore")
  1211  		if err := n.blockStore.Close(); err != nil {
  1212  			n.Logger.Error("problem closing blockstore", "err", err)
  1213  		}
  1214  	}
  1215  	if n.stateStore != nil {
  1216  		n.Logger.Info("Closing statestore")
  1217  		if err := n.stateStore.Close(); err != nil {
  1218  			n.Logger.Error("problem closing statestore", "err", err)
  1219  		}
  1220  	}
  1221  	if n.evidencePool != nil {
  1222  		n.Logger.Info("Closing evidencestore")
  1223  		if err := n.EvidencePool().Close(); err != nil {
  1224  			n.Logger.Error("problem closing evidencestore", "err", err)
  1225  		}
  1226  	}
  1227  }
  1228  
  1229  // ConfigureRPC makes sure RPC has all the objects it needs to operate.
  1230  func (n *Node) ConfigureRPC() error {
  1231  	pubKey, err := n.privValidator.GetPubKey()
  1232  	if err != nil {
  1233  		return fmt.Errorf("can't get pubkey: %w", err)
  1234  	}
  1235  	rpccore.SetEnvironment(&rpccore.Environment{
  1236  		ProxyAppQuery:   n.proxyApp.Query(),
  1237  		ProxyAppMempool: n.proxyApp.Mempool(),
  1238  
  1239  		StateStore:     n.stateStore,
  1240  		BlockStore:     n.blockStore,
  1241  		EvidencePool:   n.evidencePool,
  1242  		ConsensusState: n.consensusState,
  1243  		P2PPeers:       n.sw,
  1244  		P2PTransport:   n,
  1245  
  1246  		PubKey:           pubKey,
  1247  		GenDoc:           n.genesisDoc,
  1248  		TxIndexer:        n.txIndexer,
  1249  		BlockIndexer:     n.blockIndexer,
  1250  		ConsensusReactor: n.consensusReactor,
  1251  		EventBus:         n.eventBus,
  1252  		Mempool:          n.mempool,
  1253  
  1254  		Logger: n.Logger.With("module", "rpc"),
  1255  
  1256  		Config: *n.config.RPC,
  1257  	})
  1258  	if err := rpccore.InitGenesisChunks(); err != nil {
  1259  		return err
  1260  	}
  1261  
  1262  	return nil
  1263  }
  1264  
  1265  func (n *Node) startRPC() ([]net.Listener, error) {
  1266  	err := n.ConfigureRPC()
  1267  	if err != nil {
  1268  		return nil, err
  1269  	}
  1270  
  1271  	listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
  1272  
  1273  	if n.config.RPC.Unsafe {
  1274  		rpccore.AddUnsafeRoutes()
  1275  	}
  1276  
  1277  	config := rpcserver.DefaultConfig()
  1278  	config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1279  	config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1280  	config.MaxOpenConnections = n.config.RPC.MaxOpenConnections
  1281  	// If necessary adjust global WriteTimeout to ensure it's greater than
  1282  	// TimeoutBroadcastTxCommit.
  1283  	// See https://github.com/tendermint/tendermint/issues/3435
  1284  	if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1285  		config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1286  	}
  1287  
  1288  	// we may expose the rpc over both a unix and tcp socket
  1289  	listeners := make([]net.Listener, len(listenAddrs))
  1290  	for i, listenAddr := range listenAddrs {
  1291  		mux := http.NewServeMux()
  1292  		rpcLogger := n.Logger.With("module", "rpc-server")
  1293  		wmLogger := rpcLogger.With("protocol", "websocket")
  1294  		wm := rpcserver.NewWebsocketManager(rpccore.Routes,
  1295  			rpcserver.OnDisconnect(func(remoteAddr string) {
  1296  				err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr)
  1297  				if err != nil && err != cmtpubsub.ErrSubscriptionNotFound {
  1298  					wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err)
  1299  				}
  1300  			}),
  1301  			rpcserver.ReadLimit(config.MaxBodyBytes),
  1302  			rpcserver.WriteChanCapacity(n.config.RPC.WebSocketWriteBufferSize),
  1303  		)
  1304  		wm.SetLogger(wmLogger)
  1305  		mux.HandleFunc("/websocket", wm.WebsocketHandler)
  1306  		rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger)
  1307  		listener, err := rpcserver.Listen(
  1308  			listenAddr,
  1309  			config,
  1310  		)
  1311  		if err != nil {
  1312  			return nil, err
  1313  		}
  1314  
  1315  		var rootHandler http.Handler = mux
  1316  		if n.config.RPC.IsCorsEnabled() {
  1317  			corsMiddleware := cors.New(cors.Options{
  1318  				AllowedOrigins: n.config.RPC.CORSAllowedOrigins,
  1319  				AllowedMethods: n.config.RPC.CORSAllowedMethods,
  1320  				AllowedHeaders: n.config.RPC.CORSAllowedHeaders,
  1321  			})
  1322  			rootHandler = corsMiddleware.Handler(mux)
  1323  		}
  1324  		if n.config.RPC.IsTLSEnabled() {
  1325  			go func() {
  1326  				if err := rpcserver.ServeTLS(
  1327  					listener,
  1328  					rootHandler,
  1329  					n.config.RPC.CertFile(),
  1330  					n.config.RPC.KeyFile(),
  1331  					rpcLogger,
  1332  					config,
  1333  				); err != nil {
  1334  					n.Logger.Error("Error serving server with TLS", "err", err)
  1335  				}
  1336  			}()
  1337  		} else {
  1338  			go func() {
  1339  				if err := rpcserver.Serve(
  1340  					listener,
  1341  					rootHandler,
  1342  					rpcLogger,
  1343  					config,
  1344  				); err != nil {
  1345  					n.Logger.Error("Error serving server", "err", err)
  1346  				}
  1347  			}()
  1348  		}
  1349  
  1350  		listeners[i] = listener
  1351  	}
  1352  
  1353  	// we expose a simplified api over grpc for convenience to app devs
  1354  	grpcListenAddr := n.config.RPC.GRPCListenAddress
  1355  	if grpcListenAddr != "" {
  1356  		config := rpcserver.DefaultConfig()
  1357  		config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1358  		config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1359  		// NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections
  1360  		config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections
  1361  		// If necessary adjust global WriteTimeout to ensure it's greater than
  1362  		// TimeoutBroadcastTxCommit.
  1363  		// See https://github.com/tendermint/tendermint/issues/3435
  1364  		if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1365  			config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1366  		}
  1367  		listener, err := rpcserver.Listen(grpcListenAddr, config)
  1368  		if err != nil {
  1369  			return nil, err
  1370  		}
  1371  		go func() {
  1372  			if err := grpccore.StartGRPCServer(listener); err != nil {
  1373  				n.Logger.Error("Error starting gRPC server", "err", err)
  1374  			}
  1375  		}()
  1376  		listeners = append(listeners, listener)
  1377  
  1378  	}
  1379  
  1380  	return listeners, nil
  1381  }
  1382  
  1383  // startPrometheusServer starts a Prometheus HTTP server, listening for metrics
  1384  // collectors on addr.
  1385  func (n *Node) startPrometheusServer(addr string) *http.Server {
  1386  	srv := &http.Server{
  1387  		Addr: addr,
  1388  		Handler: promhttp.InstrumentMetricHandler(
  1389  			prometheus.DefaultRegisterer, promhttp.HandlerFor(
  1390  				prometheus.DefaultGatherer,
  1391  				promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
  1392  			),
  1393  		),
  1394  		ReadHeaderTimeout: readHeaderTimeout,
  1395  	}
  1396  	go func() {
  1397  		if err := srv.ListenAndServe(); err != http.ErrServerClosed {
  1398  			// Error starting or closing listener:
  1399  			n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
  1400  		}
  1401  	}()
  1402  	return srv
  1403  }
  1404  
  1405  // Switch returns the Node's Switch.
  1406  func (n *Node) Switch() *p2p.Switch {
  1407  	return n.sw
  1408  }
  1409  
  1410  // BlockStore returns the Node's BlockStore.
  1411  func (n *Node) BlockStore() *store.BlockStore {
  1412  	return n.blockStore
  1413  }
  1414  
  1415  // ConsensusState returns the Node's ConsensusState.
  1416  func (n *Node) ConsensusState() *cs.State {
  1417  	return n.consensusState
  1418  }
  1419  
  1420  // ConsensusReactor returns the Node's ConsensusReactor.
  1421  func (n *Node) ConsensusReactor() *cs.Reactor {
  1422  	return n.consensusReactor
  1423  }
  1424  
  1425  // MempoolReactor returns the Node's mempool reactor.
  1426  func (n *Node) MempoolReactor() p2p.Reactor {
  1427  	return n.mempoolReactor
  1428  }
  1429  
  1430  // Mempool returns the Node's mempool.
  1431  func (n *Node) Mempool() mempl.Mempool {
  1432  	return n.mempool
  1433  }
  1434  
  1435  // PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled.
  1436  func (n *Node) PEXReactor() *pex.Reactor {
  1437  	return n.pexReactor
  1438  }
  1439  
  1440  // EvidencePool returns the Node's EvidencePool.
  1441  func (n *Node) EvidencePool() *evidence.Pool {
  1442  	return n.evidencePool
  1443  }
  1444  
  1445  // EventBus returns the Node's EventBus.
  1446  func (n *Node) EventBus() *types.EventBus {
  1447  	return n.eventBus
  1448  }
  1449  
  1450  // PrivValidator returns the Node's PrivValidator.
  1451  // XXX: for convenience only!
  1452  func (n *Node) PrivValidator() types.PrivValidator {
  1453  	return n.privValidator
  1454  }
  1455  
  1456  // GenesisDoc returns the Node's GenesisDoc.
  1457  func (n *Node) GenesisDoc() *types.GenesisDoc {
  1458  	return n.genesisDoc
  1459  }
  1460  
  1461  // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
  1462  func (n *Node) ProxyApp() proxy.AppConns {
  1463  	return n.proxyApp
  1464  }
  1465  
  1466  // Config returns the Node's config.
  1467  func (n *Node) Config() *cfg.Config {
  1468  	return n.config
  1469  }
  1470  
  1471  //------------------------------------------------------------------------------
  1472  
  1473  func (n *Node) Listeners() []string {
  1474  	return []string{
  1475  		fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress),
  1476  	}
  1477  }
  1478  
  1479  func (n *Node) IsListening() bool {
  1480  	return n.isListening
  1481  }
  1482  
  1483  // NodeInfo returns the Node's Info from the Switch.
  1484  func (n *Node) NodeInfo() p2p.NodeInfo {
  1485  	return n.nodeInfo
  1486  }
  1487  
  1488  func makeNodeInfo(
  1489  	config *cfg.Config,
  1490  	nodeKey *p2p.NodeKey,
  1491  	txIndexer txindex.TxIndexer,
  1492  	genDoc *types.GenesisDoc,
  1493  	state sm.State,
  1494  ) (p2p.DefaultNodeInfo, error) {
  1495  	txIndexerStatus := "on"
  1496  	if _, ok := txIndexer.(*null.TxIndex); ok {
  1497  		txIndexerStatus = "off"
  1498  	}
  1499  
  1500  	nodeInfo := p2p.DefaultNodeInfo{
  1501  		ProtocolVersion: p2p.NewProtocolVersion(
  1502  			version.P2PProtocol, // global
  1503  			state.Version.Consensus.Block,
  1504  			state.Version.Consensus.App,
  1505  		),
  1506  		DefaultNodeID: nodeKey.ID(),
  1507  		Network:       genDoc.ChainID,
  1508  		Version:       version.TMCoreSemVer,
  1509  		Channels: []byte{
  1510  			bc.BlocksyncChannel,
  1511  			cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
  1512  			mempl.MempoolChannel,
  1513  			evidence.EvidenceChannel,
  1514  			statesync.SnapshotChannel, statesync.ChunkChannel,
  1515  		},
  1516  		Moniker: config.Moniker,
  1517  		Other: p2p.DefaultNodeInfoOther{
  1518  			TxIndex:    txIndexerStatus,
  1519  			RPCAddress: config.RPC.ListenAddress,
  1520  		},
  1521  	}
  1522  
  1523  	if config.P2P.PexReactor {
  1524  		nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  1525  	}
  1526  
  1527  	lAddr := config.P2P.ExternalAddress
  1528  
  1529  	if lAddr == "" {
  1530  		lAddr = config.P2P.ListenAddress
  1531  	}
  1532  
  1533  	nodeInfo.ListenAddr = lAddr
  1534  
  1535  	err := nodeInfo.Validate()
  1536  	return nodeInfo, err
  1537  }
  1538  
  1539  //------------------------------------------------------------------------------
  1540  
  1541  var genesisDocKey = []byte("genesisDoc")
  1542  
  1543  // LoadStateFromDBOrGenesisDocProvider attempts to load the state from the
  1544  // database, or creates one using the given genesisDocProvider. On success this also
  1545  // returns the genesis doc loaded through the given provider.
  1546  func LoadStateFromDBOrGenesisDocProvider(
  1547  	stateDB dbm.DB,
  1548  	genesisDocProvider GenesisDocProvider,
  1549  ) (sm.State, *types.GenesisDoc, error) {
  1550  	// Get genesis doc
  1551  	genDoc, err := loadGenesisDoc(stateDB)
  1552  	if err != nil {
  1553  		genDoc, err = genesisDocProvider()
  1554  		if err != nil {
  1555  			return sm.State{}, nil, err
  1556  		}
  1557  		// save genesis doc to prevent a certain class of user errors (e.g. when it
  1558  		// was changed, accidentally or not). Also good for audit trail.
  1559  		if err := saveGenesisDoc(stateDB, genDoc); err != nil {
  1560  			return sm.State{}, nil, err
  1561  		}
  1562  	}
  1563  	stateStore := sm.NewStore(stateDB, sm.StoreOptions{
  1564  		DiscardABCIResponses: false,
  1565  	})
  1566  	state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
  1567  	if err != nil {
  1568  		return sm.State{}, nil, err
  1569  	}
  1570  	return state, genDoc, nil
  1571  }
  1572  
  1573  // panics if failed to unmarshal bytes
  1574  func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
  1575  	b, err := db.Get(genesisDocKey)
  1576  	if err != nil {
  1577  		panic(err)
  1578  	}
  1579  	if len(b) == 0 {
  1580  		return nil, errors.New("genesis doc not found")
  1581  	}
  1582  	var genDoc *types.GenesisDoc
  1583  	err = cmtjson.Unmarshal(b, &genDoc)
  1584  	if err != nil {
  1585  		panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b))
  1586  	}
  1587  	return genDoc, nil
  1588  }
  1589  
  1590  // panics if failed to marshal the given genesis document
  1591  func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error {
  1592  	b, err := cmtjson.Marshal(genDoc)
  1593  	if err != nil {
  1594  		return fmt.Errorf("failed to save genesis doc due to marshaling error: %w", err)
  1595  	}
  1596  	if err := db.SetSync(genesisDocKey, b); err != nil {
  1597  		return err
  1598  	}
  1599  
  1600  	return nil
  1601  }
  1602  
  1603  func createAndStartPrivValidatorSocketClient(
  1604  	listenAddr,
  1605  	chainID string,
  1606  	logger log.Logger,
  1607  ) (types.PrivValidator, error) {
  1608  	pve, err := privval.NewSignerListener(listenAddr, logger)
  1609  	if err != nil {
  1610  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1611  	}
  1612  
  1613  	pvsc, err := privval.NewSignerClient(pve, chainID)
  1614  	if err != nil {
  1615  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1616  	}
  1617  
  1618  	// try to get a pubkey from private validate first time
  1619  	_, err = pvsc.GetPubKey()
  1620  	if err != nil {
  1621  		return nil, fmt.Errorf("can't get pubkey: %w", err)
  1622  	}
  1623  
  1624  	const (
  1625  		retries = 50 // 50 * 100ms = 5s total
  1626  		timeout = 100 * time.Millisecond
  1627  	)
  1628  	pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout)
  1629  
  1630  	return pvscWithRetries, nil
  1631  }
  1632  
  1633  // splitAndTrimEmpty slices s into all subslices separated by sep and returns a
  1634  // slice of the string s with all leading and trailing Unicode code points
  1635  // contained in cutset removed. If sep is empty, SplitAndTrim splits after each
  1636  // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
  1637  // -1.  also filter out empty strings, only return non-empty strings.
  1638  func splitAndTrimEmpty(s, sep, cutset string) []string {
  1639  	if s == "" {
  1640  		return []string{}
  1641  	}
  1642  
  1643  	spl := strings.Split(s, sep)
  1644  	nonEmptyStrings := make([]string, 0, len(spl))
  1645  	for i := 0; i < len(spl); i++ {
  1646  		element := strings.Trim(spl[i], cutset)
  1647  		if element != "" {
  1648  			nonEmptyStrings = append(nonEmptyStrings, element)
  1649  		}
  1650  	}
  1651  	return nonEmptyStrings
  1652  }