github.com/Finschia/ostracon@v1.1.5/node/node.go (about)

     1  package node
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"net"
     9  	"net/http"
    10  	_ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
    11  	"strings"
    12  	"time"
    13  
    14  	"github.com/prometheus/client_golang/prometheus"
    15  	"github.com/prometheus/client_golang/prometheus/promhttp"
    16  	"github.com/rs/cors"
    17  	abci "github.com/tendermint/tendermint/abci/types"
    18  	dbm "github.com/tendermint/tm-db"
    19  
    20  	bcv0 "github.com/Finschia/ostracon/blockchain/v0"
    21  	bcv1 "github.com/Finschia/ostracon/blockchain/v1"
    22  	bcv2 "github.com/Finschia/ostracon/blockchain/v2"
    23  	cfg "github.com/Finschia/ostracon/config"
    24  	cs "github.com/Finschia/ostracon/consensus"
    25  	"github.com/Finschia/ostracon/crypto"
    26  	"github.com/Finschia/ostracon/evidence"
    27  	tmjson "github.com/Finschia/ostracon/libs/json"
    28  	"github.com/Finschia/ostracon/libs/log"
    29  	tmpubsub "github.com/Finschia/ostracon/libs/pubsub"
    30  	"github.com/Finschia/ostracon/libs/service"
    31  	"github.com/Finschia/ostracon/light"
    32  	mempl "github.com/Finschia/ostracon/mempool"
    33  	mempoolv0 "github.com/Finschia/ostracon/mempool/v0"
    34  
    35  	//mempoolv1 "github.com/Finschia/ostracon/mempool/v1"
    36  	"github.com/Finschia/ostracon/p2p"
    37  	"github.com/Finschia/ostracon/p2p/pex"
    38  	"github.com/Finschia/ostracon/privval"
    39  	"github.com/Finschia/ostracon/proxy"
    40  	rpccore "github.com/Finschia/ostracon/rpc/core"
    41  	grpccore "github.com/Finschia/ostracon/rpc/grpc"
    42  	rpcserver "github.com/Finschia/ostracon/rpc/jsonrpc/server"
    43  	sm "github.com/Finschia/ostracon/state"
    44  	"github.com/Finschia/ostracon/state/indexer"
    45  	blockidxkv "github.com/Finschia/ostracon/state/indexer/block/kv"
    46  	blockidxnull "github.com/Finschia/ostracon/state/indexer/block/null"
    47  	"github.com/Finschia/ostracon/state/indexer/sink/psql"
    48  	"github.com/Finschia/ostracon/state/txindex"
    49  	"github.com/Finschia/ostracon/state/txindex/kv"
    50  	"github.com/Finschia/ostracon/state/txindex/null"
    51  	"github.com/Finschia/ostracon/statesync"
    52  	"github.com/Finschia/ostracon/store"
    53  	"github.com/Finschia/ostracon/types"
    54  	tmtime "github.com/Finschia/ostracon/types/time"
    55  	"github.com/Finschia/ostracon/version"
    56  
    57  	_ "github.com/lib/pq" // provide the psql db driver
    58  )
    59  
    60  //------------------------------------------------------------------------------
    61  
    62  // DBContext specifies config information for loading a new DB.
    63  type DBContext struct {
    64  	ID     string
    65  	Config *cfg.Config
    66  }
    67  
    68  // DBProvider takes a DBContext and returns an instantiated DB.
    69  type DBProvider func(*DBContext) (dbm.DB, error)
    70  
    71  const readHeaderTimeout = 10 * time.Second
    72  
    73  // DefaultDBProvider returns a database using the DBBackend and DBDir
    74  // specified in the ctx.Config.
    75  func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
    76  	dbType := dbm.BackendType(ctx.Config.DBBackend)
    77  	return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
    78  }
    79  
    80  // GenesisDocProvider returns a GenesisDoc.
    81  // It allows the GenesisDoc to be pulled from sources other than the
    82  // filesystem, for instance from a distributed key-value store cluster.
    83  type GenesisDocProvider func() (*types.GenesisDoc, error)
    84  
    85  // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
    86  // the GenesisDoc from the config.GenesisFile() on the filesystem.
    87  func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
    88  	return func() (*types.GenesisDoc, error) {
    89  		return types.GenesisDocFromFile(config.GenesisFile())
    90  	}
    91  }
    92  
    93  // Provider takes a config and a logger and returns a ready to go Node.
    94  type Provider func(*cfg.Config, log.Logger) (*Node, error)
    95  
    96  // DefaultNewNode returns an Ostracon node with default settings for the
    97  // PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
    98  // It implements NodeProvider.
    99  func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
   100  	nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
   101  	if err != nil {
   102  		return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err)
   103  	}
   104  
   105  	pv := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
   106  	return NewNode(config,
   107  		pv,
   108  		nodeKey,
   109  		proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
   110  		DefaultGenesisDocProviderFunc(config),
   111  		DefaultDBProvider,
   112  		DefaultMetricsProvider(config.Instrumentation),
   113  		logger,
   114  	)
   115  }
   116  
   117  // NewOstraconNode returns an Ostracon node for more safe production environments that don't automatically generate
   118  // critical files. This function doesn't reference local key pair in configurations using KMS.
   119  func NewOstraconNode(config *cfg.Config, logger log.Logger) (*Node, error) {
   120  	nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
   121  	if err != nil {
   122  		return nil, fmt.Errorf("failed to load node key %s: %w", config.NodeKeyFile(), err)
   123  	}
   124  
   125  	var privKey types.PrivValidator
   126  	if strings.TrimSpace(config.PrivValidatorListenAddr) == "" {
   127  		privKey = privval.LoadFilePV(
   128  			config.PrivValidatorKeyFile(),
   129  			config.PrivValidatorStateFile())
   130  	}
   131  	return NewNode(
   132  		config,
   133  		privKey,
   134  		nodeKey,
   135  		proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
   136  		DefaultGenesisDocProviderFunc(config),
   137  		DefaultDBProvider,
   138  		DefaultMetricsProvider(config.Instrumentation),
   139  		logger,
   140  	)
   141  }
   142  
   143  // MetricsProvider returns a consensus, p2p and mempool Metrics.
   144  type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics)
   145  
   146  // DefaultMetricsProvider returns Metrics build using Prometheus client library
   147  // if Prometheus is enabled. Otherwise, it returns no-op Metrics.
   148  func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
   149  	return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) {
   150  		if config.Prometheus {
   151  			return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   152  				p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   153  				mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   154  				sm.PrometheusMetrics(config.Namespace, "chain_id", chainID)
   155  		}
   156  		return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics()
   157  	}
   158  }
   159  
   160  // Option sets a parameter for the node.
   161  type Option func(*Node)
   162  
   163  // Temporary interface for switching to fast sync, we should get rid of v0 and v1 reactors.
   164  // See: https://github.com/tendermint/tendermint/issues/4595
   165  type fastSyncReactor interface {
   166  	SwitchToFastSync(sm.State) error
   167  }
   168  
   169  // CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to
   170  // the node's Switch.
   171  //
   172  // WARNING: using any name from the below list of the existing reactors will
   173  // result in replacing it with the custom one.
   174  //
   175  //   - MEMPOOL
   176  //   - BLOCKCHAIN
   177  //   - CONSENSUS
   178  //   - EVIDENCE
   179  //   - PEX
   180  //   - STATESYNC
   181  func CustomReactors(reactors map[string]p2p.Reactor) Option {
   182  	return func(n *Node) {
   183  		for name, reactor := range reactors {
   184  			if existingReactor := n.sw.Reactor(name); existingReactor != nil {
   185  				n.sw.Logger.Info("Replacing existing reactor with a custom one",
   186  					"name", name, "existing", existingReactor, "custom", reactor)
   187  				n.sw.RemoveReactor(name, existingReactor)
   188  			}
   189  			n.sw.AddReactor(name, reactor)
   190  			// register the new channels to the nodeInfo
   191  			// NOTE: This is a bit messy now with the type casting but is
   192  			// cleaned up in the following version when NodeInfo is changed from
   193  			// and interface to a concrete type
   194  			if ni, ok := n.nodeInfo.(p2p.DefaultNodeInfo); ok {
   195  				for _, chDesc := range reactor.GetChannels() {
   196  					if !ni.HasChannel(chDesc.ID) {
   197  						ni.Channels = append(ni.Channels, chDesc.ID)
   198  						err := n.transport.AddChannel(chDesc.ID)
   199  						if err != nil {
   200  							n.Logger.Debug("AddChannel failed", "err", err)
   201  						}
   202  					}
   203  				}
   204  				n.nodeInfo = ni
   205  			} else {
   206  				n.Logger.Error("Node info is not of type DefaultNodeInfo. Custom reactor channels can not be added.")
   207  			}
   208  		}
   209  	}
   210  }
   211  
   212  //------------------------------------------------------------------------------
   213  
   214  // Node is the highest level interface to a full Ostracon node.
   215  // It includes all configuration information and running services.
   216  type Node struct {
   217  	service.BaseService
   218  
   219  	// config
   220  	config        *cfg.Config
   221  	genesisDoc    *types.GenesisDoc   // initial validator set
   222  	privValidator types.PrivValidator // local node's validator key
   223  
   224  	// network
   225  	transport   *p2p.MultiplexTransport
   226  	sw          *p2p.Switch  // p2p connections
   227  	addrBook    pex.AddrBook // known peers
   228  	nodeInfo    p2p.NodeInfo
   229  	nodeKey     *p2p.NodeKey // our node privkey
   230  	isListening bool
   231  
   232  	// services
   233  	eventBus          *types.EventBus // pub/sub for services
   234  	stateStore        sm.Store
   235  	blockStore        *store.BlockStore // store the blockchain to disk
   236  	bcReactor         p2p.Reactor       // for fast-syncing
   237  	mempoolReactor    p2p.Reactor       // for gossipping transactions
   238  	mempool           mempl.Mempool
   239  	stateSync         bool                    // whether the node should state sync on startup
   240  	stateSyncReactor  *statesync.Reactor      // for hosting and restoring state sync snapshots
   241  	stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node
   242  	stateSyncGenesis  sm.State                // provides the genesis state for state sync
   243  	consensusState    *cs.State               // latest consensus state
   244  	consensusReactor  *cs.Reactor             // for participating in the consensus
   245  	pexReactor        *pex.Reactor            // for exchanging peer addresses
   246  	evidencePool      *evidence.Pool          // tracking evidence
   247  	proxyApp          proxy.AppConns          // connection to the application
   248  	rpcListeners      []net.Listener          // rpc servers
   249  	txIndexer         txindex.TxIndexer
   250  	blockIndexer      indexer.BlockIndexer
   251  	indexerService    *txindex.IndexerService
   252  	prometheusSrv     *http.Server
   253  }
   254  
   255  func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
   256  	var blockStoreDB dbm.DB
   257  	blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
   258  	if err != nil {
   259  		return
   260  	}
   261  	blockStore = store.NewBlockStore(blockStoreDB)
   262  
   263  	stateDB, err = dbProvider(&DBContext{"state", config})
   264  	if err != nil {
   265  		return
   266  	}
   267  
   268  	return
   269  }
   270  
   271  func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) {
   272  	proxyApp := proxy.NewAppConns(clientCreator)
   273  	proxyApp.SetLogger(logger.With("module", "proxy"))
   274  	if err := proxyApp.Start(); err != nil {
   275  		return nil, fmt.Errorf("error starting proxy app connections: %v", err)
   276  	}
   277  	return proxyApp, nil
   278  }
   279  
   280  func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
   281  	eventBus := types.NewEventBus()
   282  	eventBus.SetLogger(logger.With("module", "events"))
   283  	if err := eventBus.Start(); err != nil {
   284  		return nil, err
   285  	}
   286  	return eventBus, nil
   287  }
   288  
   289  func createAndStartIndexerService(
   290  	config *cfg.Config,
   291  	chainID string,
   292  	dbProvider DBProvider,
   293  	eventBus *types.EventBus,
   294  	logger log.Logger,
   295  ) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) {
   296  	var (
   297  		txIndexer    txindex.TxIndexer
   298  		blockIndexer indexer.BlockIndexer
   299  	)
   300  
   301  	switch config.TxIndex.Indexer {
   302  	case "kv":
   303  		store, err := dbProvider(&DBContext{"tx_index", config})
   304  		if err != nil {
   305  			return nil, nil, nil, err
   306  		}
   307  
   308  		txIndexer = kv.NewTxIndex(store)
   309  		blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events")))
   310  
   311  	case "psql":
   312  		if config.TxIndex.PsqlConn == "" {
   313  			return nil, nil, nil, errors.New(`no psql-conn is set for the "psql" indexer`)
   314  		}
   315  		es, err := psql.NewEventSink(config.TxIndex.PsqlConn, chainID)
   316  		if err != nil {
   317  			return nil, nil, nil, fmt.Errorf("creating psql indexer: %w", err)
   318  		}
   319  		txIndexer = es.TxIndexer()
   320  		blockIndexer = es.BlockIndexer()
   321  
   322  	default:
   323  		txIndexer = &null.TxIndex{}
   324  		blockIndexer = &blockidxnull.BlockerIndexer{}
   325  	}
   326  
   327  	indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus, false)
   328  	indexerService.SetLogger(logger.With("module", "txindex"))
   329  
   330  	if err := indexerService.Start(); err != nil {
   331  		return nil, nil, nil, err
   332  	}
   333  
   334  	return indexerService, txIndexer, blockIndexer, nil
   335  }
   336  
   337  func doHandshake(
   338  	stateStore sm.Store,
   339  	state sm.State,
   340  	blockStore sm.BlockStore,
   341  	genDoc *types.GenesisDoc,
   342  	eventBus types.BlockEventPublisher,
   343  	proxyApp proxy.AppConns,
   344  	consensusLogger log.Logger,
   345  ) error {
   346  	handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc)
   347  	handshaker.SetLogger(consensusLogger)
   348  	handshaker.SetEventBus(eventBus)
   349  	if err := handshaker.Handshake(proxyApp); err != nil {
   350  		return fmt.Errorf("error during handshake: %v", err)
   351  	}
   352  	return nil
   353  }
   354  
   355  func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) {
   356  	// Log the version info.
   357  	logger.Info("Version info",
   358  		"software", version.OCCoreSemVer,
   359  		"abci", version.ABCIVersion,
   360  		"app", state.Version.Consensus.App,
   361  		"block", state.Version.Consensus.Block,
   362  		"p2p", version.P2PProtocol,
   363  	)
   364  
   365  	// If the state and software differ in block version, at least log it.
   366  	if state.Version.Consensus.Block != version.BlockProtocol {
   367  		logger.Info("Software and state have different block protocols",
   368  			"software", version.BlockProtocol,
   369  			"state", state.Version.Consensus.Block,
   370  		)
   371  	}
   372  
   373  	addr := pubKey.Address()
   374  	// Log whether this node is a validator or an observer
   375  	if state.Validators.HasAddress(addr) {
   376  		consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey)
   377  	} else {
   378  		consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
   379  	}
   380  }
   381  
   382  func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
   383  	if state.Validators.Size() > 1 {
   384  		return false
   385  	}
   386  	addr, _ := state.Validators.GetByIndex(0)
   387  	return bytes.Equal(pubKey.Address(), addr)
   388  }
   389  
   390  func createMempoolAndMempoolReactor(
   391  	config *cfg.Config,
   392  	proxyApp proxy.AppConns,
   393  	state sm.State,
   394  	memplMetrics *mempl.Metrics,
   395  	logger log.Logger,
   396  ) (mempl.Mempool, p2p.Reactor) {
   397  	switch config.Mempool.Version {
   398  	case cfg.MempoolV1: // XXX Deprecated MempoolV1
   399  		panic("Deprecated MempoolV1")
   400  		/*
   401  			// TODO(thane): Remove log once https://github.com/tendermint/tendermint/issues/8775 is resolved.
   402  			logger.Error("While the prioritized mempool API is stable, there is a critical bug in it that is currently under investigation. See https://github.com/tendermint/tendermint/issues/8775 for details")
   403  			mp := mempoolv1.NewTxMempool(
   404  				logger,
   405  				config.Mempool,
   406  				proxyApp.Mempool(),
   407  				state.LastBlockHeight,
   408  				mempoolv1.WithMetrics(memplMetrics),
   409  				mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
   410  				mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
   411  			)
   412  
   413  			reactor := mempoolv1.NewReactor(
   414  				config.Mempool,
   415  				mp,
   416  			)
   417  			if config.Consensus.WaitForTxs() {
   418  				mp.EnableTxsAvailable()
   419  			}
   420  
   421  			return mp, reactor
   422  		*/
   423  	case cfg.MempoolV0:
   424  		mp := mempoolv0.NewCListMempool(
   425  			config.Mempool,
   426  			proxyApp.Mempool(),
   427  			state.LastBlockHeight,
   428  			mempoolv0.WithMetrics(memplMetrics),
   429  			mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
   430  			mempoolv0.WithPostCheck(sm.TxPostCheck(state)),
   431  		)
   432  
   433  		mp.SetLogger(logger)
   434  
   435  		reactor := mempoolv0.NewReactor(
   436  			config.Mempool,
   437  			config.P2P.RecvAsync,
   438  			config.P2P.MempoolRecvBufSize,
   439  			mp,
   440  		)
   441  
   442  		if config.Consensus.WaitForTxs() {
   443  			mp.EnableTxsAvailable()
   444  		}
   445  
   446  		return mp, reactor
   447  
   448  	default:
   449  		return nil, nil
   450  	}
   451  }
   452  
   453  func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
   454  	stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger,
   455  ) (*evidence.Reactor, *evidence.Pool, error) {
   456  	evidenceDB, err := dbProvider(&DBContext{"evidence", config})
   457  	if err != nil {
   458  		return nil, nil, err
   459  	}
   460  	evidenceLogger := logger.With("module", "evidence")
   461  	evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB, sm.StoreOptions{
   462  		DiscardABCIResponses: config.Storage.DiscardABCIResponses,
   463  	}), blockStore)
   464  	if err != nil {
   465  		return nil, nil, err
   466  	}
   467  	evidenceReactor := evidence.NewReactor(evidencePool, config.P2P.RecvAsync, config.P2P.EvidenceRecvBufSize)
   468  	evidenceReactor.SetLogger(evidenceLogger)
   469  	return evidenceReactor, evidencePool, nil
   470  }
   471  
   472  func createBlockchainReactor(config *cfg.Config,
   473  	state sm.State,
   474  	blockExec *sm.BlockExecutor,
   475  	blockStore *store.BlockStore,
   476  	fastSync bool,
   477  	logger log.Logger,
   478  ) (bcReactor p2p.Reactor, err error) {
   479  	switch config.FastSync.Version {
   480  	case "v0":
   481  		bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync,
   482  			config.P2P.RecvAsync, config.P2P.BlockchainRecvBufSize)
   483  	case "v1":
   484  		bcReactor = bcv1.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync,
   485  			config.P2P.RecvAsync, config.P2P.BlockchainRecvBufSize)
   486  	case "v2":
   487  		bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   488  	default:
   489  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
   490  	}
   491  
   492  	bcReactor.SetLogger(logger.With("module", "blockchain"))
   493  	return bcReactor, nil
   494  }
   495  
   496  func createConsensusReactor(config *cfg.Config,
   497  	state sm.State,
   498  	blockExec *sm.BlockExecutor,
   499  	blockStore sm.BlockStore,
   500  	mempool mempl.Mempool,
   501  	evidencePool *evidence.Pool,
   502  	privValidator types.PrivValidator,
   503  	csMetrics *cs.Metrics,
   504  	waitSync bool,
   505  	eventBus *types.EventBus,
   506  	consensusLogger log.Logger,
   507  ) (*cs.Reactor, *cs.State) {
   508  	consensusState := cs.NewState(
   509  		config.Consensus,
   510  		state.Copy(),
   511  		blockExec,
   512  		blockStore,
   513  		mempool,
   514  		evidencePool,
   515  		cs.StateMetrics(csMetrics),
   516  	)
   517  	consensusState.SetLogger(consensusLogger)
   518  	if privValidator != nil {
   519  		consensusState.SetPrivValidator(privValidator)
   520  	}
   521  	consensusReactor := cs.NewReactor(consensusState, waitSync, config.P2P.RecvAsync, config.P2P.ConsensusRecvBufSize,
   522  		cs.ReactorMetrics(csMetrics))
   523  	consensusReactor.SetLogger(consensusLogger)
   524  	// services which will be publishing and/or subscribing for messages (events)
   525  	// consensusReactor will set it on consensusState and blockExecutor
   526  	consensusReactor.SetEventBus(eventBus)
   527  	return consensusReactor, consensusState
   528  }
   529  
   530  func createTransport(
   531  	config *cfg.Config,
   532  	nodeInfo p2p.NodeInfo,
   533  	nodeKey *p2p.NodeKey,
   534  	proxyApp proxy.AppConns,
   535  ) (
   536  	*p2p.MultiplexTransport,
   537  	[]p2p.PeerFilterFunc,
   538  ) {
   539  	var (
   540  		mConnConfig = p2p.MConnConfig(config.P2P)
   541  		transport   = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig)
   542  		connFilters = []p2p.ConnFilterFunc{}
   543  		peerFilters = []p2p.PeerFilterFunc{}
   544  	)
   545  
   546  	if !config.P2P.AllowDuplicateIP {
   547  		connFilters = append(connFilters, p2p.ConnDuplicateIPFilter())
   548  	}
   549  
   550  	// Filter peers by addr or pubkey with an ABCI query.
   551  	// If the query return code is OK, add peer.
   552  	if config.FilterPeers {
   553  		connFilters = append(
   554  			connFilters,
   555  			// ABCI query for address filtering.
   556  			func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
   557  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   558  					Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
   559  				})
   560  				if err != nil {
   561  					return err
   562  				}
   563  				if res.IsErr() {
   564  					return fmt.Errorf("error querying abci app: %v", res)
   565  				}
   566  
   567  				return nil
   568  			},
   569  		)
   570  
   571  		peerFilters = append(
   572  			peerFilters,
   573  			// ABCI query for ID filtering.
   574  			func(_ p2p.IPeerSet, p p2p.Peer) error {
   575  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   576  					Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
   577  				})
   578  				if err != nil {
   579  					return err
   580  				}
   581  				if res.IsErr() {
   582  					return fmt.Errorf("error querying abci app: %v", res)
   583  				}
   584  
   585  				return nil
   586  			},
   587  		)
   588  	}
   589  
   590  	p2p.MultiplexTransportConnFilters(connFilters...)(transport)
   591  
   592  	// Limit the number of incoming connections.
   593  	max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   594  	p2p.MultiplexTransportMaxIncomingConnections(max)(transport)
   595  
   596  	return transport, peerFilters
   597  }
   598  
   599  func createSwitch(config *cfg.Config,
   600  	transport p2p.Transport,
   601  	p2pMetrics *p2p.Metrics,
   602  	peerFilters []p2p.PeerFilterFunc,
   603  	mempoolReactor p2p.Reactor,
   604  	bcReactor p2p.Reactor,
   605  	stateSyncReactor *statesync.Reactor,
   606  	consensusReactor *cs.Reactor,
   607  	evidenceReactor *evidence.Reactor,
   608  	nodeInfo p2p.NodeInfo,
   609  	nodeKey *p2p.NodeKey,
   610  	p2pLogger log.Logger,
   611  ) *p2p.Switch {
   612  	sw := p2p.NewSwitch(
   613  		config.P2P,
   614  		transport,
   615  		p2p.WithMetrics(p2pMetrics),
   616  		p2p.SwitchPeerFilters(peerFilters...),
   617  	)
   618  	sw.SetLogger(p2pLogger)
   619  	sw.AddReactor("MEMPOOL", mempoolReactor)
   620  	sw.AddReactor("BLOCKCHAIN", bcReactor)
   621  	sw.AddReactor("CONSENSUS", consensusReactor)
   622  	sw.AddReactor("EVIDENCE", evidenceReactor)
   623  	sw.AddReactor("STATESYNC", stateSyncReactor)
   624  
   625  	sw.SetNodeInfo(nodeInfo)
   626  	sw.SetNodeKey(nodeKey)
   627  
   628  	p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
   629  	return sw
   630  }
   631  
   632  func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
   633  	p2pLogger log.Logger, nodeKey *p2p.NodeKey,
   634  ) (pex.AddrBook, error) {
   635  	addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
   636  	addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
   637  
   638  	// Add ourselves to addrbook to prevent dialing ourselves
   639  	if config.P2P.ExternalAddress != "" {
   640  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress))
   641  		if err != nil {
   642  			return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err)
   643  		}
   644  		addrBook.AddOurAddress(addr)
   645  	}
   646  	if config.P2P.ListenAddress != "" {
   647  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress))
   648  		if err != nil {
   649  			return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err)
   650  		}
   651  		addrBook.AddOurAddress(addr)
   652  	}
   653  
   654  	sw.SetAddrBook(addrBook)
   655  
   656  	return addrBook, nil
   657  }
   658  
   659  func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
   660  	sw *p2p.Switch, logger log.Logger,
   661  ) *pex.Reactor {
   662  	// TODO persistent peers ? so we can have their DNS addrs saved
   663  	pexReactor := pex.NewReactor(addrBook,
   664  		config.P2P.RecvAsync,
   665  		&pex.ReactorConfig{
   666  			Seeds:    splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
   667  			SeedMode: config.P2P.SeedMode,
   668  			// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
   669  			// blocks assuming 10s blocks ~ 28 hours.
   670  			// TODO (melekes): make it dynamic based on the actual block latencies
   671  			// from the live network.
   672  			// https://github.com/tendermint/tendermint/issues/3523
   673  			SeedDisconnectWaitPeriod:     28 * time.Hour,
   674  			PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
   675  			RecvBufSize:                  config.P2P.PexRecvBufSize,
   676  		})
   677  	pexReactor.SetLogger(logger.With("module", "pex"))
   678  	sw.AddReactor("PEX", pexReactor)
   679  	return pexReactor
   680  }
   681  
   682  // startStateSync starts an asynchronous state sync process, then switches to fast sync mode.
   683  func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor,
   684  	stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool,
   685  	stateStore sm.Store, blockStore *store.BlockStore, state sm.State,
   686  ) error {
   687  	ssR.Logger.Info("Starting state sync")
   688  
   689  	if stateProvider == nil {
   690  		var err error
   691  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   692  		defer cancel()
   693  		stateProvider, err = statesync.NewLightClientStateProvider(
   694  			ctx,
   695  			state.ChainID, state.Version, state.InitialHeight,
   696  			config.RPCServers, light.TrustOptions{
   697  				Period: config.TrustPeriod,
   698  				Height: config.TrustHeight,
   699  				Hash:   config.TrustHashBytes(),
   700  			}, ssR.Logger.With("module", "light"))
   701  		if err != nil {
   702  			return fmt.Errorf("failed to set up light client state provider: %w", err)
   703  		}
   704  	}
   705  
   706  	go func() {
   707  		state, previousState, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime)
   708  		if err != nil {
   709  			ssR.Logger.Error("State sync failed", "err", err)
   710  			return
   711  		}
   712  		if previousState.LastBlockHeight > 0 {
   713  			err = stateStore.Bootstrap(previousState)
   714  			if err != nil {
   715  				ssR.Logger.Error("Failed to bootstrap node with previous state", "err", err)
   716  				return
   717  			}
   718  		}
   719  		err = stateStore.Bootstrap(state)
   720  		if err != nil {
   721  			ssR.Logger.Error("Failed to bootstrap node with new state", "err", err)
   722  			return
   723  		}
   724  		err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit)
   725  		if err != nil {
   726  			ssR.Logger.Error("Failed to store last seen commit", "err", err)
   727  			return
   728  		}
   729  
   730  		if fastSync {
   731  			// FIXME Very ugly to have these metrics bleed through here.
   732  			conR.Metrics.StateSyncing.Set(0)
   733  			conR.Metrics.FastSyncing.Set(1)
   734  			err = bcR.SwitchToFastSync(state)
   735  			if err != nil {
   736  				ssR.Logger.Error("Failed to switch to fast sync", "err", err)
   737  				return
   738  			}
   739  		} else {
   740  			conR.SwitchToConsensus(state, true)
   741  		}
   742  	}()
   743  	return nil
   744  }
   745  
   746  // NewNode returns a new, ready to go, Ostracon Node.
   747  func NewNode(config *cfg.Config,
   748  	privValidator types.PrivValidator,
   749  	nodeKey *p2p.NodeKey,
   750  	clientCreator proxy.ClientCreator,
   751  	genesisDocProvider GenesisDocProvider,
   752  	dbProvider DBProvider,
   753  	metricsProvider MetricsProvider,
   754  	logger log.Logger,
   755  	options ...Option,
   756  ) (*Node, error) {
   757  	blockStore, stateDB, err := initDBs(config, dbProvider)
   758  	if err != nil {
   759  		return nil, err
   760  	}
   761  
   762  	stateStore := sm.NewStore(stateDB, sm.StoreOptions{
   763  		DiscardABCIResponses: config.Storage.DiscardABCIResponses,
   764  	})
   765  
   766  	state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
   767  	if err != nil {
   768  		return nil, err
   769  	}
   770  
   771  	// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
   772  	proxyApp, err := createAndStartProxyAppConns(clientCreator, logger)
   773  	if err != nil {
   774  		return nil, err
   775  	}
   776  
   777  	// EventBus and IndexerService must be started before the handshake because
   778  	// we might need to index the txs of the replayed block as this might not have happened
   779  	// when the node stopped last time (i.e. the node stopped after it saved the block
   780  	// but before it indexed the txs, or, endblocker panicked)
   781  	eventBus, err := createAndStartEventBus(logger)
   782  	if err != nil {
   783  		return nil, err
   784  	}
   785  
   786  	indexerService, txIndexer, blockIndexer, err := createAndStartIndexerService(config,
   787  		genDoc.ChainID, dbProvider, eventBus, logger)
   788  	if err != nil {
   789  		return nil, err
   790  	}
   791  
   792  	// If an address is provided, listen on the socket for a connection from an
   793  	// external signing process.
   794  	if strings.TrimSpace(config.PrivValidatorListenAddr) != "" {
   795  		// FIXME: we should start services inside OnStart
   796  		privValidator, err = CreateAndStartPrivValidatorSocketClient(config, genDoc.ChainID, logger)
   797  		if err != nil {
   798  			return nil, fmt.Errorf("error with private validator socket client: %w", err)
   799  		}
   800  	}
   801  
   802  	pubKey, err := privValidator.GetPubKey()
   803  	if err != nil {
   804  		return nil, fmt.Errorf("can't get pubkey: %w", err)
   805  	}
   806  
   807  	// Determine whether we should attempt state sync.
   808  	stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey)
   809  	if stateSync && state.LastBlockHeight > 0 {
   810  		logger.Info("Found local state with non-zero height, skipping state sync")
   811  		stateSync = false
   812  	}
   813  
   814  	// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
   815  	// and replays any blocks as necessary to sync ostracon with the app.
   816  	consensusLogger := logger.With("module", "consensus")
   817  	if !stateSync {
   818  		if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil {
   819  			return nil, err
   820  		}
   821  
   822  		// Reload the state. It will have the Version.Consensus.App set by the
   823  		// Handshake, and may have other modifications as well (ie. depending on
   824  		// what happened during block replay).
   825  		state, err = stateStore.Load()
   826  		if err != nil {
   827  			return nil, fmt.Errorf("cannot load state: %w", err)
   828  		}
   829  	}
   830  
   831  	// Determine whether we should do fast sync. This must happen after the handshake, since the
   832  	// app may modify the validator set, specifying ourself as the only validator.
   833  	fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey)
   834  
   835  	logNodeStartupInfo(state, pubKey, logger, consensusLogger)
   836  
   837  	csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID)
   838  
   839  	// Make MempoolReactor
   840  	mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
   841  
   842  	// Make Evidence Reactor
   843  	evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger)
   844  	if err != nil {
   845  		return nil, err
   846  	}
   847  
   848  	// make block executor for consensus and blockchain reactors to execute blocks
   849  	blockExec := sm.NewBlockExecutor(
   850  		stateStore,
   851  		logger.With("module", "state"),
   852  		proxyApp.Consensus(),
   853  		mempool,
   854  		evidencePool,
   855  		sm.BlockExecutorWithMetrics(smMetrics),
   856  	)
   857  
   858  	// Make BlockchainReactor. Don't start fast sync if we're doing a state sync first.
   859  	bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync && !stateSync, logger)
   860  	if err != nil {
   861  		return nil, fmt.Errorf("could not create blockchain reactor: %w", err)
   862  	}
   863  
   864  	// Make ConsensusReactor. Don't enable fully if doing a state sync and/or fast sync first.
   865  	// FIXME We need to update metrics here, since other reactors don't have access to them.
   866  	if stateSync {
   867  		csMetrics.StateSyncing.Set(1)
   868  	} else if fastSync {
   869  		csMetrics.FastSyncing.Set(1)
   870  	}
   871  	consensusReactor, consensusState := createConsensusReactor(
   872  		config, state, blockExec, blockStore, mempool, evidencePool,
   873  		privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger,
   874  	)
   875  
   876  	// Set up state sync reactor, and schedule a sync if requested.
   877  	// FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy,
   878  	// we should clean this whole thing up. See:
   879  	// https://github.com/tendermint/tendermint/issues/4644
   880  	stateSyncReactor := statesync.NewReactor(
   881  		*config.StateSync,
   882  		proxyApp.Snapshot(),
   883  		proxyApp.Query(),
   884  		config.P2P.RecvAsync,
   885  		config.P2P.StatesyncRecvBufSize)
   886  	stateSyncReactor.SetLogger(logger.With("module", "statesync"))
   887  
   888  	nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state)
   889  	if err != nil {
   890  		return nil, err
   891  	}
   892  
   893  	// Setup Transport.
   894  	transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp)
   895  
   896  	// Setup Switch.
   897  	p2pLogger := logger.With("module", "p2p")
   898  	sw := createSwitch(
   899  		config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor,
   900  		stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger,
   901  	)
   902  
   903  	err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
   904  	if err != nil {
   905  		return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err)
   906  	}
   907  
   908  	err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   909  	if err != nil {
   910  		return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
   911  	}
   912  
   913  	addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
   914  	if err != nil {
   915  		return nil, fmt.Errorf("could not create addrbook: %w", err)
   916  	}
   917  
   918  	// Optionally, start the pex reactor
   919  	//
   920  	// TODO:
   921  	//
   922  	// We need to set Seeds and PersistentPeers on the switch,
   923  	// since it needs to be able to use these (and their DNS names)
   924  	// even if the PEX is off. We can include the DNS name in the NetAddress,
   925  	// but it would still be nice to have a clear list of the current "PersistentPeers"
   926  	// somewhere that we can return with net_info.
   927  	//
   928  	// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
   929  	// Note we currently use the addrBook regardless at least for AddOurAddress
   930  	var pexReactor *pex.Reactor
   931  	if config.P2P.PexReactor {
   932  		pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
   933  	}
   934  
   935  	if config.RPC.PprofListenAddress != "" {
   936  		go func() {
   937  			logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress)
   938  			//nolint:gosec,nolintlint // G114: Use of net/http serve function that has no support for setting timeouts
   939  			logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil))
   940  		}()
   941  	}
   942  
   943  	node := &Node{
   944  		config:        config,
   945  		genesisDoc:    genDoc,
   946  		privValidator: privValidator,
   947  
   948  		transport: transport,
   949  		sw:        sw,
   950  		addrBook:  addrBook,
   951  		nodeInfo:  nodeInfo,
   952  		nodeKey:   nodeKey,
   953  
   954  		stateStore:       stateStore,
   955  		blockStore:       blockStore,
   956  		bcReactor:        bcReactor,
   957  		mempoolReactor:   mempoolReactor,
   958  		mempool:          mempool,
   959  		consensusState:   consensusState,
   960  		consensusReactor: consensusReactor,
   961  		stateSyncReactor: stateSyncReactor,
   962  		stateSync:        stateSync,
   963  		stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state
   964  		pexReactor:       pexReactor,
   965  		evidencePool:     evidencePool,
   966  		proxyApp:         proxyApp,
   967  		txIndexer:        txIndexer,
   968  		indexerService:   indexerService,
   969  		blockIndexer:     blockIndexer,
   970  		eventBus:         eventBus,
   971  	}
   972  	node.BaseService = *service.NewBaseService(logger, "Node", node)
   973  
   974  	for _, option := range options {
   975  		option(node)
   976  	}
   977  
   978  	return node, nil
   979  }
   980  
   981  // OnStart starts the Node. It implements service.Service.
   982  func (n *Node) OnStart() error {
   983  	now := tmtime.Now()
   984  	genTime := n.genesisDoc.GenesisTime
   985  	if genTime.After(now) {
   986  		n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime)
   987  		time.Sleep(genTime.Sub(now))
   988  	}
   989  
   990  	// Add private IDs to addrbook to block those peers being added
   991  	n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
   992  
   993  	// Start the RPC server before the P2P server
   994  	// so we can eg. receive txs for the first block
   995  	if n.config.RPC.ListenAddress != "" {
   996  		listeners, err := n.startRPC()
   997  		if err != nil {
   998  			return err
   999  		}
  1000  		n.rpcListeners = listeners
  1001  	}
  1002  
  1003  	if n.config.Instrumentation.Prometheus &&
  1004  		n.config.Instrumentation.PrometheusListenAddr != "" {
  1005  		n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
  1006  	}
  1007  
  1008  	// Start the transport.
  1009  	addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress))
  1010  	if err != nil {
  1011  		return err
  1012  	}
  1013  	if err := n.transport.Listen(*addr); err != nil {
  1014  		return err
  1015  	}
  1016  
  1017  	n.isListening = true
  1018  
  1019  	// Start the switch (the P2P server).
  1020  	err = n.sw.Start()
  1021  	if err != nil {
  1022  		return err
  1023  	}
  1024  
  1025  	// Always connect to persistent peers
  1026  	err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
  1027  	if err != nil {
  1028  		return fmt.Errorf("could not dial peers from persistent_peers field: %w", err)
  1029  	}
  1030  
  1031  	// Run state sync
  1032  	if n.stateSync {
  1033  		bcR, ok := n.bcReactor.(fastSyncReactor)
  1034  		if !ok {
  1035  			return fmt.Errorf("this blockchain reactor does not support switching from state sync")
  1036  		}
  1037  		err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider,
  1038  			n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis)
  1039  		if err != nil {
  1040  			return fmt.Errorf("failed to start state sync: %w", err)
  1041  		}
  1042  	}
  1043  
  1044  	return nil
  1045  }
  1046  
  1047  // OnStop stops the Node. It implements service.Service.
  1048  func (n *Node) OnStop() {
  1049  	n.BaseService.OnStop()
  1050  
  1051  	n.Logger.Info("Stopping Node")
  1052  
  1053  	// first stop the non-reactor services
  1054  	if err := n.eventBus.Stop(); err != nil {
  1055  		n.Logger.Error("Error closing eventBus", "err", err)
  1056  	}
  1057  	if err := n.indexerService.Stop(); err != nil {
  1058  		n.Logger.Error("Error closing indexerService", "err", err)
  1059  	}
  1060  
  1061  	// now stop the reactors
  1062  	if err := n.sw.Stop(); err != nil {
  1063  		n.Logger.Error("Error closing switch", "err", err)
  1064  	}
  1065  
  1066  	if err := n.transport.Close(); err != nil {
  1067  		n.Logger.Error("Error closing transport", "err", err)
  1068  	}
  1069  
  1070  	n.isListening = false
  1071  
  1072  	// finally stop the listeners / external services
  1073  	for _, l := range n.rpcListeners {
  1074  		n.Logger.Info("Closing rpc listener", "listener", l)
  1075  		if err := l.Close(); err != nil {
  1076  			n.Logger.Error("Error closing listener", "listener", l, "err", err)
  1077  		}
  1078  	}
  1079  
  1080  	if pvsc, ok := n.privValidator.(service.Service); ok {
  1081  		if err := pvsc.Stop(); err != nil {
  1082  			n.Logger.Error("Error closing private validator", "err", err)
  1083  		}
  1084  	}
  1085  
  1086  	if n.prometheusSrv != nil {
  1087  		if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
  1088  			// Error from closing listeners, or context timeout:
  1089  			n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
  1090  		}
  1091  	}
  1092  	if n.blockStore != nil {
  1093  		if err := n.blockStore.Close(); err != nil {
  1094  			n.Logger.Error("problem closing blockstore", "err", err)
  1095  		}
  1096  	}
  1097  	if n.stateStore != nil {
  1098  		if err := n.stateStore.Close(); err != nil {
  1099  			n.Logger.Error("problem closing statestore", "err", err)
  1100  		}
  1101  	}
  1102  }
  1103  
  1104  // ConfigureRPC makes sure RPC has all the objects it needs to operate.
  1105  func (n *Node) ConfigureRPC() error {
  1106  	pubKey, err := n.privValidator.GetPubKey()
  1107  	if err != nil {
  1108  		return fmt.Errorf("can't get pubkey: %w", err)
  1109  	}
  1110  	rpccore.SetEnvironment(&rpccore.Environment{
  1111  		ProxyAppQuery:   n.proxyApp.Query(),
  1112  		ProxyAppMempool: n.proxyApp.Mempool(),
  1113  
  1114  		StateStore:     n.stateStore,
  1115  		BlockStore:     n.blockStore,
  1116  		EvidencePool:   n.evidencePool,
  1117  		ConsensusState: n.consensusState,
  1118  		P2PPeers:       n.sw,
  1119  		P2PTransport:   n,
  1120  
  1121  		PubKey:           pubKey,
  1122  		GenDoc:           n.genesisDoc,
  1123  		TxIndexer:        n.txIndexer,
  1124  		BlockIndexer:     n.blockIndexer,
  1125  		ConsensusReactor: n.consensusReactor,
  1126  		EventBus:         n.eventBus,
  1127  		Mempool:          n.mempool,
  1128  
  1129  		Logger: n.Logger.With("module", "rpc"),
  1130  
  1131  		Config: *n.config.RPC,
  1132  	})
  1133  	if err := rpccore.InitGenesisChunks(); err != nil {
  1134  		return err
  1135  	}
  1136  
  1137  	return nil
  1138  }
  1139  
  1140  func (n *Node) startRPC() ([]net.Listener, error) {
  1141  	err := n.ConfigureRPC()
  1142  	if err != nil {
  1143  		return nil, err
  1144  	}
  1145  
  1146  	listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
  1147  
  1148  	if n.config.RPC.Unsafe {
  1149  		rpccore.AddUnsafeRoutes()
  1150  	}
  1151  
  1152  	config := rpcserver.DefaultConfig()
  1153  	config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1154  	config.MaxBatchRequestNum = n.config.RPC.MaxBatchRequestNum
  1155  	config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1156  	config.MaxOpenConnections = n.config.RPC.MaxOpenConnections
  1157  	config.ReadTimeout = n.config.RPC.ReadTimeout
  1158  	config.WriteTimeout = n.config.RPC.WriteTimeout
  1159  	config.IdleTimeout = n.config.RPC.IdleTimeout
  1160  	// If necessary adjust global WriteTimeout to ensure it's greater than
  1161  	// TimeoutBroadcastTxCommit.
  1162  	// See https://github.com/tendermint/tendermint/issues/3435
  1163  	if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1164  		config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1165  	}
  1166  
  1167  	// we may expose the rpc over both a unix and tcp socket
  1168  	listeners := make([]net.Listener, len(listenAddrs))
  1169  	for i, listenAddr := range listenAddrs {
  1170  		mux := http.NewServeMux()
  1171  		rpcLogger := n.Logger.With("module", "rpc-server")
  1172  		wmLogger := rpcLogger.With("protocol", "websocket")
  1173  		wm := rpcserver.NewWebsocketManager(rpccore.Routes,
  1174  			rpcserver.OnDisconnect(func(remoteAddr string) {
  1175  				err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr)
  1176  				if err != nil && err != tmpubsub.ErrSubscriptionNotFound {
  1177  					wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err)
  1178  				}
  1179  			}),
  1180  			rpcserver.ReadLimit(config.MaxBodyBytes),
  1181  			rpcserver.WriteChanCapacity(n.config.RPC.WebSocketWriteBufferSize),
  1182  		)
  1183  		wm.SetLogger(wmLogger)
  1184  		mux.HandleFunc("/websocket", wm.WebsocketHandler)
  1185  		rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger)
  1186  		listener, err := rpcserver.Listen(
  1187  			listenAddr,
  1188  			config,
  1189  		)
  1190  		if err != nil {
  1191  			return nil, err
  1192  		}
  1193  
  1194  		var rootHandler http.Handler = mux
  1195  		if n.config.RPC.IsCorsEnabled() {
  1196  			corsMiddleware := cors.New(cors.Options{
  1197  				AllowedOrigins: n.config.RPC.CORSAllowedOrigins,
  1198  				AllowedMethods: n.config.RPC.CORSAllowedMethods,
  1199  				AllowedHeaders: n.config.RPC.CORSAllowedHeaders,
  1200  			})
  1201  			rootHandler = corsMiddleware.Handler(mux)
  1202  		}
  1203  		if n.config.RPC.IsTLSEnabled() {
  1204  			go func() {
  1205  				if err := rpcserver.ServeTLS(
  1206  					listener,
  1207  					rootHandler,
  1208  					n.config.RPC.CertFile(),
  1209  					n.config.RPC.KeyFile(),
  1210  					rpcLogger,
  1211  					config,
  1212  				); err != nil {
  1213  					n.Logger.Error("Error serving server with TLS", "err", err)
  1214  				}
  1215  			}()
  1216  		} else {
  1217  			go func() {
  1218  				if err := rpcserver.Serve(
  1219  					listener,
  1220  					rootHandler,
  1221  					rpcLogger,
  1222  					config,
  1223  				); err != nil {
  1224  					n.Logger.Error("Error serving server", "err", err)
  1225  				}
  1226  			}()
  1227  		}
  1228  
  1229  		listeners[i] = listener
  1230  	}
  1231  
  1232  	// we expose a simplified api over grpc for convenience to app devs
  1233  	grpcListenAddr := n.config.RPC.GRPCListenAddress
  1234  	if grpcListenAddr != "" {
  1235  		config := rpcserver.DefaultConfig()
  1236  		config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1237  		config.MaxBatchRequestNum = n.config.RPC.MaxBatchRequestNum
  1238  		config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1239  		// NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections
  1240  		config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections
  1241  		// If necessary adjust global WriteTimeout to ensure it's greater than
  1242  		// TimeoutBroadcastTxCommit.
  1243  		// See https://github.com/tendermint/tendermint/issues/3435
  1244  		if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1245  			config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1246  		}
  1247  		listener, err := rpcserver.Listen(grpcListenAddr, config)
  1248  		if err != nil {
  1249  			return nil, err
  1250  		}
  1251  		go func() {
  1252  			if err := grpccore.StartGRPCServer(listener); err != nil {
  1253  				n.Logger.Error("Error starting gRPC server", "err", err)
  1254  			}
  1255  		}()
  1256  		listeners = append(listeners, listener)
  1257  
  1258  	}
  1259  
  1260  	return listeners, nil
  1261  }
  1262  
  1263  // startPrometheusServer starts a Prometheus HTTP server, listening for metrics
  1264  // collectors on addr.
  1265  func (n *Node) startPrometheusServer(addr string) *http.Server {
  1266  	srv := &http.Server{
  1267  		Addr: addr,
  1268  		Handler: promhttp.InstrumentMetricHandler(
  1269  			prometheus.DefaultRegisterer, promhttp.HandlerFor(
  1270  				prometheus.DefaultGatherer,
  1271  				promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
  1272  			),
  1273  		),
  1274  		ReadHeaderTimeout: readHeaderTimeout,
  1275  	}
  1276  	go func() {
  1277  		if err := srv.ListenAndServe(); err != http.ErrServerClosed {
  1278  			// Error starting or closing listener:
  1279  			n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
  1280  		}
  1281  	}()
  1282  	return srv
  1283  }
  1284  
  1285  // Switch returns the Node's Switch.
  1286  func (n *Node) Switch() *p2p.Switch {
  1287  	return n.sw
  1288  }
  1289  
  1290  // BlockStore returns the Node's BlockStore.
  1291  func (n *Node) BlockStore() *store.BlockStore {
  1292  	return n.blockStore
  1293  }
  1294  
  1295  // ConsensusState returns the Node's ConsensusState.
  1296  func (n *Node) ConsensusState() *cs.State {
  1297  	return n.consensusState
  1298  }
  1299  
  1300  // ConsensusReactor returns the Node's ConsensusReactor.
  1301  func (n *Node) ConsensusReactor() *cs.Reactor {
  1302  	return n.consensusReactor
  1303  }
  1304  
  1305  // MempoolReactor returns the Node's mempool reactor.
  1306  func (n *Node) MempoolReactor() p2p.Reactor {
  1307  	return n.mempoolReactor
  1308  }
  1309  
  1310  // Mempool returns the Node's mempool.
  1311  func (n *Node) Mempool() mempl.Mempool {
  1312  	return n.mempool
  1313  }
  1314  
  1315  // PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled.
  1316  func (n *Node) PEXReactor() *pex.Reactor {
  1317  	return n.pexReactor
  1318  }
  1319  
  1320  // EvidencePool returns the Node's EvidencePool.
  1321  func (n *Node) EvidencePool() *evidence.Pool {
  1322  	return n.evidencePool
  1323  }
  1324  
  1325  // EventBus returns the Node's EventBus.
  1326  func (n *Node) EventBus() *types.EventBus {
  1327  	return n.eventBus
  1328  }
  1329  
  1330  // PrivValidator returns the Node's PrivValidator.
  1331  // XXX: for convenience only!
  1332  func (n *Node) PrivValidator() types.PrivValidator {
  1333  	return n.privValidator
  1334  }
  1335  
  1336  // GenesisDoc returns the Node's GenesisDoc.
  1337  func (n *Node) GenesisDoc() *types.GenesisDoc {
  1338  	return n.genesisDoc
  1339  }
  1340  
  1341  // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
  1342  func (n *Node) ProxyApp() proxy.AppConns {
  1343  	return n.proxyApp
  1344  }
  1345  
  1346  // Config returns the Node's config.
  1347  func (n *Node) Config() *cfg.Config {
  1348  	return n.config
  1349  }
  1350  
  1351  //------------------------------------------------------------------------------
  1352  
  1353  func (n *Node) Listeners() []string {
  1354  	return []string{
  1355  		fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress),
  1356  	}
  1357  }
  1358  
  1359  func (n *Node) IsListening() bool {
  1360  	return n.isListening
  1361  }
  1362  
  1363  // NodeInfo returns the Node's Info from the Switch.
  1364  func (n *Node) NodeInfo() p2p.NodeInfo {
  1365  	return n.nodeInfo
  1366  }
  1367  
  1368  func makeNodeInfo(
  1369  	config *cfg.Config,
  1370  	nodeKey *p2p.NodeKey,
  1371  	txIndexer txindex.TxIndexer,
  1372  	genDoc *types.GenesisDoc,
  1373  	state sm.State,
  1374  ) (p2p.DefaultNodeInfo, error) {
  1375  	txIndexerStatus := "on"
  1376  	if _, ok := txIndexer.(*null.TxIndex); ok {
  1377  		txIndexerStatus = "off"
  1378  	}
  1379  
  1380  	var bcChannel byte
  1381  	switch config.FastSync.Version {
  1382  	case "v0":
  1383  		bcChannel = bcv0.BlockchainChannel
  1384  	case "v1":
  1385  		bcChannel = bcv1.BlockchainChannel
  1386  	case "v2":
  1387  		bcChannel = bcv2.BlockchainChannel
  1388  	default:
  1389  		return p2p.DefaultNodeInfo{}, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
  1390  	}
  1391  
  1392  	nodeInfo := p2p.DefaultNodeInfo{
  1393  		ProtocolVersion: p2p.NewProtocolVersion(
  1394  			version.P2PProtocol, // global
  1395  			state.Version.Consensus.Block,
  1396  			state.Version.Consensus.App,
  1397  		),
  1398  		DefaultNodeID: nodeKey.ID(),
  1399  		Network:       genDoc.ChainID,
  1400  		Version:       version.OCCoreSemVer,
  1401  		Channels: []byte{
  1402  			bcChannel,
  1403  			cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
  1404  			mempl.MempoolChannel,
  1405  			evidence.EvidenceChannel,
  1406  			statesync.SnapshotChannel, statesync.ChunkChannel,
  1407  		},
  1408  		Moniker: config.Moniker,
  1409  		Other: p2p.DefaultNodeInfoOther{
  1410  			TxIndex:    txIndexerStatus,
  1411  			RPCAddress: config.RPC.ListenAddress,
  1412  		},
  1413  	}
  1414  
  1415  	if config.P2P.PexReactor {
  1416  		nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  1417  	}
  1418  
  1419  	lAddr := config.P2P.ExternalAddress
  1420  
  1421  	if lAddr == "" {
  1422  		lAddr = config.P2P.ListenAddress
  1423  	}
  1424  
  1425  	nodeInfo.ListenAddr = lAddr
  1426  
  1427  	err := nodeInfo.Validate()
  1428  	return nodeInfo, err
  1429  }
  1430  
  1431  //------------------------------------------------------------------------------
  1432  
  1433  var genesisDocKey = []byte("genesisDoc")
  1434  
  1435  // LoadStateFromDBOrGenesisDocProvider attempts to load the state from the
  1436  // database, or creates one using the given genesisDocProvider. On success this also
  1437  // returns the genesis doc loaded through the given provider.
  1438  func LoadStateFromDBOrGenesisDocProvider(
  1439  	stateDB dbm.DB,
  1440  	genesisDocProvider GenesisDocProvider,
  1441  ) (sm.State, *types.GenesisDoc, error) {
  1442  	// Get genesis doc
  1443  	genDoc, err := loadGenesisDoc(stateDB)
  1444  	if err != nil {
  1445  		genDoc, err = genesisDocProvider()
  1446  		if err != nil {
  1447  			return sm.State{}, nil, err
  1448  		}
  1449  		// save genesis doc to prevent a certain class of user errors (e.g. when it
  1450  		// was changed, accidentally or not). Also good for audit trail.
  1451  		if err := saveGenesisDoc(stateDB, genDoc); err != nil {
  1452  			return sm.State{}, nil, err
  1453  		}
  1454  	}
  1455  	stateStore := sm.NewStore(stateDB, sm.StoreOptions{
  1456  		DiscardABCIResponses: false,
  1457  	})
  1458  	state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
  1459  	if err != nil {
  1460  		return sm.State{}, nil, err
  1461  	}
  1462  	return state, genDoc, nil
  1463  }
  1464  
  1465  // panics if failed to unmarshal bytes
  1466  func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
  1467  	var b []byte
  1468  	iter, err := db.Iterator(append(genesisDocKey, byte(0)), append(genesisDocKey, byte(255)))
  1469  	if err != nil {
  1470  		return nil, err
  1471  	}
  1472  
  1473  	for ; iter.Valid(); iter.Next() {
  1474  		b = append(b, iter.Value()...)
  1475  	}
  1476  	if err = iter.Close(); err != nil {
  1477  		return nil, err
  1478  	}
  1479  
  1480  	if len(b) == 0 {
  1481  		return nil, errors.New("genesis doc not found")
  1482  	}
  1483  
  1484  	var genDoc *types.GenesisDoc
  1485  	err = tmjson.Unmarshal(b, &genDoc)
  1486  	if err != nil {
  1487  		panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b))
  1488  	}
  1489  	return genDoc, nil
  1490  }
  1491  
  1492  // panics if failed to marshal the given genesis document
  1493  func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error {
  1494  	b, err := tmjson.Marshal(genDoc)
  1495  	if err != nil {
  1496  		return fmt.Errorf("failed to save genesis doc due to marshaling error: %w", err)
  1497  	}
  1498  
  1499  	blockSize := 100000000 // 100mb
  1500  	blocks := make([][]byte, 0)
  1501  	for i := 0; i < len(b); i += blockSize {
  1502  		end := i + blockSize
  1503  		if end > len(b) {
  1504  			end = len(b)
  1505  		}
  1506  		blocks = append(blocks, b[i:end])
  1507  	}
  1508  
  1509  	batch := db.NewBatch()
  1510  	for i, block := range blocks {
  1511  		k := append(genesisDocKey, byte(i))
  1512  		err = batch.Set(k, block)
  1513  		if err != nil {
  1514  			return err
  1515  		}
  1516  	}
  1517  
  1518  	if err = batch.WriteSync(); err != nil {
  1519  		return err
  1520  	}
  1521  
  1522  	return nil
  1523  }
  1524  
  1525  func CreateAndStartPrivValidatorSocketClient(config *cfg.Config, chainID string, logger log.Logger) (types.PrivValidator, error) {
  1526  	pve, err := privval.NewSignerListener(logger, config.PrivValidatorListenAddr, config.PrivValidatorRemoteAddresses)
  1527  	if err != nil {
  1528  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1529  	}
  1530  
  1531  	pvsc, err := privval.NewSignerClient(pve, chainID)
  1532  	if err != nil {
  1533  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1534  	}
  1535  
  1536  	// try to get a pubkey from private validate first time
  1537  	_, err = pvsc.GetPubKey()
  1538  	if err != nil {
  1539  		return nil, fmt.Errorf("can't get pubkey: %w", err)
  1540  	}
  1541  
  1542  	const (
  1543  		retries = 50 // 50 * 100ms = 5s total
  1544  		timeout = 100 * time.Millisecond
  1545  	)
  1546  	pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout)
  1547  
  1548  	return pvscWithRetries, nil
  1549  }
  1550  
  1551  // splitAndTrimEmpty slices s into all subslices separated by sep and returns a
  1552  // slice of the string s with all leading and trailing Unicode code points
  1553  // contained in cutset removed. If sep is empty, SplitAndTrim splits after each
  1554  // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
  1555  // -1.  also filter out empty strings, only return non-empty strings.
  1556  func splitAndTrimEmpty(s, sep, cutset string) []string {
  1557  	if s == "" {
  1558  		return []string{}
  1559  	}
  1560  
  1561  	spl := strings.Split(s, sep)
  1562  	nonEmptyStrings := make([]string, 0, len(spl))
  1563  	for i := 0; i < len(spl); i++ {
  1564  		element := strings.Trim(spl[i], cutset)
  1565  		if element != "" {
  1566  			nonEmptyStrings = append(nonEmptyStrings, element)
  1567  		}
  1568  	}
  1569  	return nonEmptyStrings
  1570  }