github.com/okex/exchain@v1.8.0/libs/tendermint/node/node.go (about)

     1  package node
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"fmt"
     7  	"net"
     8  	"net/http"
     9  	_ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
    10  	"strings"
    11  	"time"
    12  
    13  	blockindex "github.com/okex/exchain/libs/tendermint/state/indexer"
    14  	bloxkindexnull "github.com/okex/exchain/libs/tendermint/state/indexer/block/null"
    15  
    16  	"github.com/okex/exchain/libs/tendermint/global"
    17  
    18  	"github.com/pkg/errors"
    19  	"github.com/prometheus/client_golang/prometheus"
    20  	"github.com/prometheus/client_golang/prometheus/promhttp"
    21  	"github.com/rs/cors"
    22  
    23  	amino "github.com/tendermint/go-amino"
    24  
    25  	dbm "github.com/okex/exchain/libs/tm-db"
    26  
    27  	sdk "github.com/okex/exchain/libs/cosmos-sdk/types"
    28  	abci "github.com/okex/exchain/libs/tendermint/abci/types"
    29  	bcv0 "github.com/okex/exchain/libs/tendermint/blockchain/v0"
    30  	bcv1 "github.com/okex/exchain/libs/tendermint/blockchain/v1"
    31  	bcv2 "github.com/okex/exchain/libs/tendermint/blockchain/v2"
    32  	cfg "github.com/okex/exchain/libs/tendermint/config"
    33  	"github.com/okex/exchain/libs/tendermint/consensus"
    34  	cs "github.com/okex/exchain/libs/tendermint/consensus"
    35  	"github.com/okex/exchain/libs/tendermint/crypto"
    36  	"github.com/okex/exchain/libs/tendermint/evidence"
    37  	"github.com/okex/exchain/libs/tendermint/libs/log"
    38  	tmpubsub "github.com/okex/exchain/libs/tendermint/libs/pubsub"
    39  	"github.com/okex/exchain/libs/tendermint/libs/service"
    40  	mempl "github.com/okex/exchain/libs/tendermint/mempool"
    41  	"github.com/okex/exchain/libs/tendermint/p2p"
    42  	"github.com/okex/exchain/libs/tendermint/p2p/pex"
    43  	"github.com/okex/exchain/libs/tendermint/privval"
    44  	"github.com/okex/exchain/libs/tendermint/proxy"
    45  	rpccore "github.com/okex/exchain/libs/tendermint/rpc/core"
    46  	ctypes "github.com/okex/exchain/libs/tendermint/rpc/core/types"
    47  	grpccore "github.com/okex/exchain/libs/tendermint/rpc/grpc"
    48  	rpcserver "github.com/okex/exchain/libs/tendermint/rpc/jsonrpc/server"
    49  	sm "github.com/okex/exchain/libs/tendermint/state"
    50  	blockindexer "github.com/okex/exchain/libs/tendermint/state/indexer/block/kv"
    51  	"github.com/okex/exchain/libs/tendermint/state/txindex"
    52  	"github.com/okex/exchain/libs/tendermint/state/txindex/kv"
    53  	"github.com/okex/exchain/libs/tendermint/state/txindex/null"
    54  	"github.com/okex/exchain/libs/tendermint/store"
    55  	"github.com/okex/exchain/libs/tendermint/types"
    56  	tmtime "github.com/okex/exchain/libs/tendermint/types/time"
    57  	"github.com/okex/exchain/libs/tendermint/version"
    58  )
    59  
    60  //------------------------------------------------------------------------------
    61  
    62  // DBContext specifies config information for loading a new DB.
    63  type DBContext struct {
    64  	ID     string
    65  	Config *cfg.Config
    66  }
    67  
    68  // DBProvider takes a DBContext and returns an instantiated DB.
    69  type DBProvider func(*DBContext) (dbm.DB, error)
    70  
    71  // DefaultDBProvider returns a database using the DBBackend and DBDir
    72  // specified in the ctx.Config.
    73  func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
    74  	dbType := dbm.BackendType(ctx.Config.DBBackend)
    75  	return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()), nil
    76  }
    77  
    78  // GenesisDocProvider returns a GenesisDoc.
    79  // It allows the GenesisDoc to be pulled from sources other than the
    80  // filesystem, for instance from a distributed key-value store cluster.
    81  type GenesisDocProvider func() (*types.GenesisDoc, error)
    82  
    83  // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
    84  // the GenesisDoc from the config.GenesisFile() on the filesystem.
    85  func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
    86  	return func() (*types.GenesisDoc, error) {
    87  		return types.GenesisDocFromFile(config.GenesisFile())
    88  	}
    89  }
    90  
    91  // Provider takes a config and a logger and returns a ready to go Node.
    92  type Provider func(*cfg.Config, log.Logger) (*Node, error)
    93  
    94  // DefaultNewNode returns a Tendermint node with default settings for the
    95  // PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
    96  // It implements NodeProvider.
    97  func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
    98  	nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
    99  	if err != nil {
   100  		return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err)
   101  	}
   102  
   103  	return NewNode(config,
   104  		privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()),
   105  		nodeKey,
   106  		proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
   107  		DefaultGenesisDocProviderFunc(config),
   108  		DefaultDBProvider,
   109  		DefaultMetricsProvider(config.Instrumentation),
   110  		logger,
   111  	)
   112  }
   113  
   114  // MetricsProvider returns a consensus, p2p and mempool Metrics.
   115  type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics)
   116  
   117  // DefaultMetricsProvider returns Metrics build using Prometheus client library
   118  // if Prometheus is enabled. Otherwise, it returns no-op Metrics.
   119  func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
   120  	return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) {
   121  		if config.Prometheus {
   122  			return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   123  				p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   124  				mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   125  				sm.PrometheusMetrics(config.Namespace, "chain_id", chainID)
   126  		}
   127  		return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics()
   128  	}
   129  }
   130  
   131  // Option sets a parameter for the node.
   132  type Option func(*Node)
   133  
   134  // CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to
   135  // the node's Switch.
   136  //
   137  // WARNING: using any name from the below list of the existing reactors will
   138  // result in replacing it with the custom one.
   139  //
   140  //  - MEMPOOL
   141  //  - BLOCKCHAIN
   142  //  - CONSENSUS
   143  //  - EVIDENCE
   144  //  - PEX
   145  func CustomReactors(reactors map[string]p2p.Reactor) Option {
   146  	return func(n *Node) {
   147  		for name, reactor := range reactors {
   148  			if existingReactor := n.sw.Reactor(name); existingReactor != nil {
   149  				n.sw.Logger.Info("Replacing existing reactor with a custom one",
   150  					"name", name, "existing", existingReactor, "custom", reactor)
   151  				n.sw.RemoveReactor(name, existingReactor)
   152  			}
   153  			n.sw.AddReactor(name, reactor)
   154  		}
   155  	}
   156  }
   157  
   158  //------------------------------------------------------------------------------
   159  
   160  // Node is the highest level interface to a full Tendermint node.
   161  // It includes all configuration information and running services.
   162  type Node struct {
   163  	service.BaseService
   164  
   165  	// config
   166  	config        *cfg.Config
   167  	genesisDoc    *types.GenesisDoc   // initial validator set
   168  	privValidator types.PrivValidator // local node's validator key
   169  
   170  	// network
   171  	transport   *p2p.MultiplexTransport
   172  	sw          *p2p.Switch  // p2p connections
   173  	addrBook    pex.AddrBook // known peers
   174  	nodeInfo    p2p.NodeInfo
   175  	nodeKey     *p2p.NodeKey // our node privkey
   176  	isListening bool
   177  
   178  	// services
   179  	eventBus         *types.EventBus // pub/sub for services
   180  	stateDB          dbm.DB
   181  	blockStore       *store.BlockStore // store the blockchain to disk
   182  	bcReactor        p2p.Reactor       // for fast-syncing
   183  	mempoolReactor   *mempl.Reactor    // for gossipping transactions
   184  	mempool          mempl.Mempool
   185  	consensusState   *cs.State      // latest consensus state
   186  	consensusReactor *cs.Reactor    // for participating in the consensus
   187  	pexReactor       *pex.Reactor   // for exchanging peer addresses
   188  	evidencePool     *evidence.Pool // tracking evidence
   189  	proxyApp         proxy.AppConns // connection to the application
   190  	rpcListeners     []net.Listener // rpc servers
   191  	txIndexer        txindex.TxIndexer
   192  	indexerService   *txindex.IndexerService
   193  	prometheusSrv    *http.Server
   194  
   195  	//blockExec
   196  	blockExec *sm.BlockExecutor
   197  }
   198  
   199  func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
   200  	var blockStoreDB dbm.DB
   201  	blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
   202  	if err != nil {
   203  		return
   204  	}
   205  	blockStore = store.NewBlockStore(blockStoreDB)
   206  
   207  	stateDB, err = dbProvider(&DBContext{"state", config})
   208  	if err != nil {
   209  		return
   210  	}
   211  
   212  	return
   213  }
   214  
   215  func initBlockStore(dataDir string) (blockStore *store.BlockStore, err error) {
   216  	var blockStoreDB dbm.DB
   217  	blockStoreDB, err = sdk.NewDB("blockstore", dataDir)
   218  	if err != nil {
   219  		return
   220  	}
   221  	blockStore = store.NewBlockStore(blockStoreDB)
   222  
   223  	return
   224  }
   225  
   226  func initTxDB(dataDir string) (txDB dbm.DB, err error) {
   227  	txDB, err = sdk.NewDB("tx_index", dataDir)
   228  	if err != nil {
   229  		return
   230  	}
   231  
   232  	return
   233  }
   234  func initBlockIndexDB(dataDir string) (txDB dbm.DB, err error) {
   235  	txDB, err = sdk.NewDB("block_index", dataDir)
   236  	if err != nil {
   237  		return
   238  	}
   239  
   240  	return
   241  }
   242  func initBlcokEventTxDB(blockIndexDb dbm.DB) dbm.DB {
   243  	return dbm.NewPrefixDB(blockIndexDb, []byte("block_events"))
   244  }
   245  
   246  func initStateDB(config *cfg.Config) (stateDB dbm.DB, err error) {
   247  	stateDB, err = sdk.NewDB("state", config.DBDir())
   248  	if err != nil {
   249  		return
   250  	}
   251  
   252  	return
   253  }
   254  
   255  func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) {
   256  	proxyApp := proxy.NewAppConns(clientCreator)
   257  	proxyApp.SetLogger(logger.With("module", "proxy"))
   258  	if err := proxyApp.Start(); err != nil {
   259  		return nil, fmt.Errorf("error starting proxy app connections: %v", err)
   260  	}
   261  	return proxyApp, nil
   262  }
   263  
   264  func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
   265  	eventBus := types.NewEventBus()
   266  	eventBus.SetLogger(logger.With("module", "events"))
   267  	if err := eventBus.Start(); err != nil {
   268  		return nil, err
   269  	}
   270  	return eventBus, nil
   271  }
   272  
   273  func createAndStartIndexerService(config *cfg.Config, dbProvider DBProvider,
   274  	eventBus *types.EventBus, logger log.Logger) (*txindex.IndexerService, txindex.TxIndexer, error) {
   275  
   276  	var txIndexer txindex.TxIndexer
   277  	var blockIndexer blockindex.BlockIndexer
   278  	switch config.TxIndex.Indexer {
   279  	case "kv":
   280  		store, err := dbProvider(&DBContext{"tx_index", config})
   281  		if err != nil {
   282  			return nil, nil, err
   283  		}
   284  		switch {
   285  		case config.TxIndex.IndexKeys != "":
   286  			txIndexer = kv.NewTxIndex(store, kv.IndexEvents(splitAndTrimEmpty(config.TxIndex.IndexKeys, ",", " ")))
   287  		case config.TxIndex.IndexAllKeys:
   288  			txIndexer = kv.NewTxIndex(store, kv.IndexAllEvents())
   289  		default:
   290  			txIndexer = kv.NewTxIndex(store)
   291  		}
   292  		blockIndexStore, err := dbProvider(&DBContext{"block_index", config})
   293  		if err != nil {
   294  			return nil, nil, err
   295  		}
   296  		blockIndexer = blockindexer.New(dbm.NewPrefixDB(blockIndexStore, []byte("block_events")))
   297  	default:
   298  		txIndexer = &null.TxIndex{}
   299  		blockIndexer = &bloxkindexnull.BlockerIndexer{}
   300  	}
   301  
   302  	indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus)
   303  	indexerService.SetLogger(logger.With("module", "txindex"))
   304  	if err := indexerService.Start(); err != nil {
   305  		return nil, nil, err
   306  	}
   307  	return indexerService, txIndexer, nil
   308  }
   309  
   310  func doHandshake(
   311  	stateDB dbm.DB,
   312  	state sm.State,
   313  	blockStore sm.BlockStore,
   314  	genDoc *types.GenesisDoc,
   315  	eventBus types.BlockEventPublisher,
   316  	proxyApp proxy.AppConns,
   317  	consensusLogger log.Logger) error {
   318  
   319  	handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc)
   320  	handshaker.SetLogger(consensusLogger)
   321  	handshaker.SetEventBus(eventBus)
   322  	if err := handshaker.Handshake(proxyApp); err != nil {
   323  		return fmt.Errorf("error during handshake: %v", err)
   324  	}
   325  	return nil
   326  }
   327  
   328  func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) {
   329  	// Log the version info.
   330  	logger.Info("Version info",
   331  		"software", version.TMCoreSemVer,
   332  		"block", version.BlockProtocol,
   333  		"p2p", version.P2PProtocol,
   334  	)
   335  
   336  	// If the state and software differ in block version, at least log it.
   337  	if state.Version.Consensus.Block != version.BlockProtocol {
   338  		logger.Info("Software and state have different block protocols",
   339  			"software", version.BlockProtocol,
   340  			"state", state.Version.Consensus.Block,
   341  		)
   342  	}
   343  
   344  	addr := pubKey.Address()
   345  	// Log whether this node is a validator or an observer
   346  	if state.Validators.HasAddress(addr) {
   347  		consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey)
   348  	} else {
   349  		consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
   350  	}
   351  }
   352  
   353  func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
   354  	if state.Validators.Size() > 1 {
   355  		return false
   356  	}
   357  	addr, _ := state.Validators.GetByIndex(0)
   358  	return bytes.Equal(pubKey.Address(), addr)
   359  }
   360  
   361  func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns,
   362  	state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (*mempl.Reactor, *mempl.CListMempool) {
   363  
   364  	mempool := mempl.NewCListMempool(
   365  		config.Mempool,
   366  		proxyApp.Mempool(),
   367  		state.LastBlockHeight,
   368  		mempl.WithMetrics(memplMetrics),
   369  		mempl.WithPreCheck(sm.TxPreCheck(state)),
   370  		mempl.WithPostCheck(sm.TxPostCheck(state)),
   371  	)
   372  	mempoolLogger := logger.With("module", "mempool")
   373  	mempoolReactor := mempl.NewReactor(config.Mempool, mempool)
   374  	mempoolReactor.SetLogger(mempoolLogger)
   375  
   376  	if config.Consensus.WaitForTxs() {
   377  		mempool.EnableTxsAvailable()
   378  	}
   379  	return mempoolReactor, mempool
   380  }
   381  
   382  func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
   383  	stateDB dbm.DB, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) {
   384  
   385  	evidenceDB, err := dbProvider(&DBContext{"evidence", config})
   386  	if err != nil {
   387  		return nil, nil, err
   388  	}
   389  	evidenceLogger := logger.With("module", "evidence")
   390  	evidencePool := evidence.NewPool(stateDB, evidenceDB)
   391  	evidencePool.SetLogger(evidenceLogger)
   392  	evidenceReactor := evidence.NewReactor(evidencePool)
   393  	evidenceReactor.SetLogger(evidenceLogger)
   394  	return evidenceReactor, evidencePool, nil
   395  }
   396  
   397  func createBlockchainReactor(config *cfg.Config,
   398  	state sm.State,
   399  	blockExec *sm.BlockExecutor,
   400  	blockStore *store.BlockStore,
   401  	fastSync bool,
   402  	logger log.Logger) (bcReactor p2p.Reactor, err error) {
   403  
   404  	switch config.FastSync.Version {
   405  	case "v0":
   406  		bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   407  	case "v1":
   408  		bcReactor = bcv1.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   409  	case "v2":
   410  		bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   411  	default:
   412  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
   413  	}
   414  
   415  	bcReactor.SetLogger(logger.With("module", "blockchain"))
   416  	return bcReactor, nil
   417  }
   418  
   419  func createConsensusReactor(config *cfg.Config,
   420  	state sm.State,
   421  	blockExec *sm.BlockExecutor,
   422  	blockStore sm.BlockStore,
   423  	mempool *mempl.CListMempool,
   424  	evidencePool *evidence.Pool,
   425  	privValidator types.PrivValidator,
   426  	csMetrics *cs.Metrics,
   427  	fastSync bool,
   428  	autoFastSync bool,
   429  	eventBus *types.EventBus,
   430  	consensusLogger log.Logger) (*consensus.Reactor, *consensus.State) {
   431  
   432  	consensusState := cs.NewState(
   433  		config.Consensus,
   434  		state.Copy(),
   435  		blockExec,
   436  		blockStore,
   437  		mempool,
   438  		evidencePool,
   439  		cs.StateMetrics(csMetrics),
   440  	)
   441  	consensusState.SetLogger(consensusLogger)
   442  	if privValidator != nil {
   443  		consensusState.SetPrivValidator(privValidator)
   444  	}
   445  	consensusReactor := cs.NewReactor(consensusState, fastSync, autoFastSync, cs.ReactorMetrics(csMetrics))
   446  	consensusReactor.SetLogger(consensusLogger)
   447  	// services which will be publishing and/or subscribing for messages (events)
   448  	// consensusReactor will set it on consensusState and blockExecutor
   449  	if eventBus != nil {
   450  		consensusReactor.SetEventBus(eventBus)
   451  	}
   452  	return consensusReactor, consensusState
   453  }
   454  
   455  func createTransport(
   456  	config *cfg.Config,
   457  	nodeInfo p2p.NodeInfo,
   458  	nodeKey *p2p.NodeKey,
   459  	proxyApp proxy.AppConns,
   460  ) (
   461  	*p2p.MultiplexTransport,
   462  	[]p2p.PeerFilterFunc,
   463  ) {
   464  	var (
   465  		mConnConfig = p2p.MConnConfig(config.P2P)
   466  		transport   = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig)
   467  		connFilters = []p2p.ConnFilterFunc{}
   468  		peerFilters = []p2p.PeerFilterFunc{}
   469  	)
   470  
   471  	if !config.P2P.AllowDuplicateIP {
   472  		connFilters = append(connFilters, p2p.ConnDuplicateIPFilter())
   473  	}
   474  
   475  	// Filter peers by addr or pubkey with an ABCI query.
   476  	// If the query return code is OK, add peer.
   477  	if config.FilterPeers {
   478  		connFilters = append(
   479  			connFilters,
   480  			// ABCI query for address filtering.
   481  			func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
   482  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   483  					Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
   484  				})
   485  				if err != nil {
   486  					return err
   487  				}
   488  				if res.IsErr() {
   489  					return fmt.Errorf("error querying abci app: %v", res)
   490  				}
   491  
   492  				return nil
   493  			},
   494  		)
   495  
   496  		peerFilters = append(
   497  			peerFilters,
   498  			// ABCI query for ID filtering.
   499  			func(_ p2p.IPeerSet, p p2p.Peer) error {
   500  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   501  					Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
   502  				})
   503  				if err != nil {
   504  					return err
   505  				}
   506  				if res.IsErr() {
   507  					return fmt.Errorf("error querying abci app: %v", res)
   508  				}
   509  
   510  				return nil
   511  			},
   512  		)
   513  	}
   514  
   515  	p2p.MultiplexTransportConnFilters(connFilters...)(transport)
   516  
   517  	// Limit the number of incoming connections.
   518  	max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   519  	p2p.MultiplexTransportMaxIncomingConnections(max)(transport)
   520  
   521  	return transport, peerFilters
   522  }
   523  
   524  func createSwitch(config *cfg.Config,
   525  	transport p2p.Transport,
   526  	p2pMetrics *p2p.Metrics,
   527  	peerFilters []p2p.PeerFilterFunc,
   528  	mempoolReactor *mempl.Reactor,
   529  	bcReactor p2p.Reactor,
   530  	consensusReactor *consensus.Reactor,
   531  	evidenceReactor *evidence.Reactor,
   532  	nodeInfo p2p.NodeInfo,
   533  	nodeKey *p2p.NodeKey,
   534  	p2pLogger log.Logger) *p2p.Switch {
   535  
   536  	sw := p2p.NewSwitch(
   537  		config.P2P,
   538  		transport,
   539  		p2p.WithMetrics(p2pMetrics),
   540  		p2p.SwitchPeerFilters(peerFilters...),
   541  	)
   542  	sw.SetLogger(p2pLogger)
   543  	sw.AddReactor("MEMPOOL", mempoolReactor)
   544  	sw.AddReactor("BLOCKCHAIN", bcReactor)
   545  	sw.AddReactor("CONSENSUS", consensusReactor)
   546  	sw.AddReactor("EVIDENCE", evidenceReactor)
   547  
   548  	sw.SetNodeInfo(nodeInfo)
   549  	sw.SetNodeKey(nodeKey)
   550  
   551  	p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
   552  	return sw
   553  }
   554  
   555  func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
   556  	p2pLogger log.Logger, nodeKey *p2p.NodeKey) (pex.AddrBook, error) {
   557  
   558  	addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
   559  	addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
   560  
   561  	// Add ourselves to addrbook to prevent dialing ourselves
   562  	if config.P2P.ExternalAddress != "" {
   563  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress))
   564  		if err != nil {
   565  			return nil, errors.Wrap(err, "p2p.external_address is incorrect")
   566  		}
   567  		addrBook.AddOurAddress(addr)
   568  	}
   569  	if config.P2P.ListenAddress != "" {
   570  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress))
   571  		if err != nil {
   572  			return nil, errors.Wrap(err, "p2p.laddr is incorrect")
   573  		}
   574  		addrBook.AddOurAddress(addr)
   575  	}
   576  
   577  	sw.SetAddrBook(addrBook)
   578  
   579  	return addrBook, nil
   580  }
   581  
   582  func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
   583  	sw *p2p.Switch, logger log.Logger) *pex.Reactor {
   584  
   585  	// TODO persistent peers ? so we can have their DNS addrs saved
   586  	pexReactor := pex.NewReactor(addrBook,
   587  		&pex.ReactorConfig{
   588  			Seeds:    splitAndTrimEmpty(setDefaultSeeds(config.P2P.Seeds), ",", " "),
   589  			SeedMode: config.P2P.SeedMode,
   590  			// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
   591  			// blocks assuming 10s blocks ~ 28 hours.
   592  			// TODO (melekes): make it dynamic based on the actual block latencies
   593  			// from the live network.
   594  			// https://github.com/okex/exchain/libs/tendermint/issues/3523
   595  			SeedDisconnectWaitPeriod:     28 * time.Hour,
   596  			PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
   597  		})
   598  	pexReactor.SetLogger(logger.With("module", "pex"))
   599  	sw.AddReactor("PEX", pexReactor)
   600  	return pexReactor
   601  }
   602  
   603  // NewNode returns a new, ready to go, Tendermint Node.
   604  func NewNode(config *cfg.Config,
   605  	privValidator types.PrivValidator,
   606  	nodeKey *p2p.NodeKey,
   607  	clientCreator proxy.ClientCreator,
   608  	genesisDocProvider GenesisDocProvider,
   609  	dbProvider DBProvider,
   610  	metricsProvider MetricsProvider,
   611  	logger log.Logger,
   612  	options ...Option) (*Node, error) {
   613  
   614  	blockStore, stateDB, err := initDBs(config, dbProvider)
   615  	if err != nil {
   616  		return nil, err
   617  	}
   618  
   619  	state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
   620  	if err != nil {
   621  		return nil, err
   622  	}
   623  
   624  	global.SetGlobalHeight(state.LastBlockHeight)
   625  
   626  	// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
   627  	proxyApp, err := createAndStartProxyAppConns(clientCreator, logger)
   628  	if err != nil {
   629  		return nil, err
   630  	}
   631  
   632  	// EventBus and IndexerService must be started before the handshake because
   633  	// we might need to index the txs of the replayed block as this might not have happened
   634  	// when the node stopped last time (i.e. the node stopped after it saved the block
   635  	// but before it indexed the txs, or, endblocker panicked)
   636  	eventBus, err := createAndStartEventBus(logger)
   637  	if err != nil {
   638  		return nil, err
   639  	}
   640  
   641  	// Transaction indexing
   642  	indexerService, txIndexer, err := createAndStartIndexerService(config, dbProvider, eventBus, logger)
   643  	if err != nil {
   644  		return nil, err
   645  	}
   646  
   647  	// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
   648  	// and replays any blocks as necessary to sync tendermint with the app.
   649  	consensusLogger := logger.With("module", "consensus")
   650  	if err := doHandshake(stateDB, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil {
   651  		return nil, err
   652  	}
   653  
   654  	// Reload the state. It will have the Version.Consensus.App set by the
   655  	// Handshake, and may have other modifications as well (ie. depending on
   656  	// what happened during block replay).
   657  	state = sm.LoadState(stateDB)
   658  
   659  	// If an address is provided, listen on the socket for a connection from an
   660  	// external signing process.
   661  	if config.PrivValidatorListenAddr != "" {
   662  		// FIXME: we should start services inside OnStart
   663  		privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, logger)
   664  		if err != nil {
   665  			return nil, errors.Wrap(err, "error with private validator socket client")
   666  		}
   667  	}
   668  
   669  	pubKey, err := privValidator.GetPubKey()
   670  	if err != nil {
   671  		return nil, errors.Wrap(err, "can't get pubkey")
   672  	}
   673  
   674  	logNodeStartupInfo(state, pubKey, logger, consensusLogger)
   675  
   676  	// Decide whether to fast-sync or not
   677  	// We don't fast-sync when the only validator is us.
   678  	fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey)
   679  	autoFastSync := config.AutoFastSync
   680  
   681  	csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID)
   682  
   683  	// Make MempoolReactor
   684  	mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
   685  	mempoolReactor.SetNodeKey(nodeKey)
   686  	// Make Evidence Reactor
   687  	evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, logger)
   688  	if err != nil {
   689  		return nil, err
   690  	}
   691  
   692  	// make block executor for consensus and blockchain reactors to execute blocks
   693  	blockExec := sm.NewBlockExecutor(
   694  		stateDB,
   695  		logger.With("module", "state"),
   696  		proxyApp.Consensus(),
   697  		mempool,
   698  		evidencePool,
   699  		sm.BlockExecutorWithMetrics(smMetrics),
   700  	)
   701  	blockExec.SetIsAsyncSaveDB(true)
   702  	if _, ok := txIndexer.(*null.TxIndex); ok {
   703  		blockExec.SetIsNullIndexer(true)
   704  	}
   705  
   706  	// Make BlockchainReactor
   707  	bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync, logger)
   708  	if err != nil {
   709  		return nil, errors.Wrap(err, "could not create blockchain reactor")
   710  	}
   711  
   712  	// Make ConsensusReactor
   713  	consensusReactor, consensusState := createConsensusReactor(
   714  		config, state, blockExec, blockStore, mempool, evidencePool,
   715  		privValidator, csMetrics, fastSync, autoFastSync, eventBus, consensusLogger,
   716  	)
   717  
   718  	nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state)
   719  	if err != nil {
   720  		return nil, err
   721  	}
   722  
   723  	// Setup Transport.
   724  	transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp)
   725  
   726  	// Setup Switch.
   727  	p2pLogger := logger.With("module", "p2p")
   728  	sw := createSwitch(
   729  		config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor,
   730  		consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger,
   731  	)
   732  
   733  	err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
   734  	if err != nil {
   735  		return nil, errors.Wrap(err, "could not add peers from persistent_peers field")
   736  	}
   737  
   738  	err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   739  	if err != nil {
   740  		return nil, errors.Wrap(err, "could not add peer ids from unconditional_peer_ids field")
   741  	}
   742  
   743  	addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
   744  	if err != nil {
   745  		return nil, errors.Wrap(err, "could not create addrbook")
   746  	}
   747  
   748  	// Optionally, start the pex reactor
   749  	//
   750  	// TODO:
   751  	//
   752  	// We need to set Seeds and PersistentPeers on the switch,
   753  	// since it needs to be able to use these (and their DNS names)
   754  	// even if the PEX is off. We can include the DNS name in the NetAddress,
   755  	// but it would still be nice to have a clear list of the current "PersistentPeers"
   756  	// somewhere that we can return with net_info.
   757  	//
   758  	// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
   759  	// Note we currently use the addrBook regardless at least for AddOurAddress
   760  	var pexReactor *pex.Reactor
   761  	if config.P2P.PexReactor {
   762  		pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
   763  	}
   764  
   765  	if config.ProfListenAddress != "" {
   766  		go func() {
   767  			logger.Error("Profile server", "err", http.ListenAndServe(config.ProfListenAddress, nil))
   768  		}()
   769  	}
   770  
   771  	node := &Node{
   772  		config:        config,
   773  		genesisDoc:    genDoc,
   774  		privValidator: privValidator,
   775  
   776  		transport: transport,
   777  		sw:        sw,
   778  		addrBook:  addrBook,
   779  		nodeInfo:  nodeInfo,
   780  		nodeKey:   nodeKey,
   781  
   782  		stateDB:          stateDB,
   783  		blockStore:       blockStore,
   784  		bcReactor:        bcReactor,
   785  		mempoolReactor:   mempoolReactor,
   786  		mempool:          mempool,
   787  		consensusState:   consensusState,
   788  		consensusReactor: consensusReactor,
   789  		pexReactor:       pexReactor,
   790  		evidencePool:     evidencePool,
   791  		proxyApp:         proxyApp,
   792  		txIndexer:        txIndexer,
   793  		indexerService:   indexerService,
   794  		eventBus:         eventBus,
   795  
   796  		blockExec: blockExec,
   797  	}
   798  	node.BaseService = *service.NewBaseService(logger, "Node", node)
   799  
   800  	for _, option := range options {
   801  		option(node)
   802  	}
   803  
   804  	return node, nil
   805  }
   806  
   807  func NewLRPNode(config *cfg.Config,
   808  	privValidator types.PrivValidator,
   809  	nodeKey *p2p.NodeKey,
   810  	clientCreator proxy.ClientCreator,
   811  	genesisDocProvider GenesisDocProvider,
   812  	dbProvider DBProvider,
   813  	originDir string,
   814  	logger log.Logger,
   815  	options ...Option) (*Node, error) {
   816  
   817  	blockStore, err := initBlockStore(originDir)
   818  	if err != nil {
   819  		return nil, err
   820  	}
   821  
   822  	stateDB, err := initStateDB(config)
   823  	if err != nil {
   824  		return nil, err
   825  	}
   826  
   827  	state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
   828  	if err != nil {
   829  		return nil, err
   830  	}
   831  
   832  	global.SetGlobalHeight(state.LastBlockHeight)
   833  
   834  	eventBus, err := createAndStartEventBus(logger)
   835  	if err != nil {
   836  		return nil, err
   837  	}
   838  
   839  	var txIndexer txindex.TxIndexer
   840  	var blockIndexer blockindex.BlockIndexer
   841  	txDB, err := initTxDB(originDir)
   842  	if err != nil {
   843  		return nil, err
   844  	}
   845  	blockIndexDb, err := initBlockIndexDB(originDir)
   846  	if err != nil {
   847  		return nil, err
   848  	}
   849  	txIndexer = kv.NewTxIndex(txDB, kv.IndexAllEvents())
   850  	blockIndexer = blockindexer.New(initBlcokEventTxDB(blockIndexDb))
   851  
   852  	indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus)
   853  	indexerService.SetLogger(logger.With("module", "txindex"))
   854  	if err := indexerService.Start(); err != nil {
   855  		return nil, err
   856  	}
   857  
   858  	// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
   859  	proxyApp, err := createAndStartProxyAppConns(clientCreator, logger)
   860  	if err != nil {
   861  		return nil, err
   862  	}
   863  
   864  	consensusLogger := logger.With("module", "consensus")
   865  
   866  	state = sm.LoadState(stateDB)
   867  	mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, nil, logger)
   868  	mempoolReactor.SetNodeKey(nodeKey)
   869  
   870  	// Make ConsensusReactor
   871  	consensusReactor, consensusState := createConsensusReactor(
   872  		config, state, nil, blockStore, nil, nil,
   873  		nil, nil, false, false, nil, consensusLogger,
   874  	)
   875  
   876  	nodeInfo, err := makeNodeInfo(config, nodeKey, nil, genDoc, state)
   877  	if err != nil {
   878  		return nil, err
   879  	}
   880  
   881  	node := &Node{
   882  		config:        config,
   883  		genesisDoc:    genDoc,
   884  		privValidator: privValidator,
   885  
   886  		nodeInfo: nodeInfo,
   887  		nodeKey:  nodeKey,
   888  
   889  		stateDB:          stateDB,
   890  		blockStore:       blockStore,
   891  		consensusState:   consensusState,
   892  		consensusReactor: consensusReactor,
   893  		txIndexer:        txIndexer,
   894  		indexerService:   indexerService,
   895  		eventBus:         eventBus,
   896  		proxyApp:         proxyApp,
   897  		mempoolReactor:   mempoolReactor,
   898  		mempool:          mempool,
   899  	}
   900  	node.BaseService = *service.NewBaseService(logger, "Node", node)
   901  
   902  	return node, nil
   903  }
   904  
   905  // OnStart starts the Node. It implements service.Service.
   906  func (n *Node) OnStart() error {
   907  	now := tmtime.Now()
   908  	genTime := n.genesisDoc.GenesisTime
   909  	if genTime.After(now) {
   910  		n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime)
   911  		time.Sleep(genTime.Sub(now))
   912  	}
   913  
   914  	// Add private IDs to addrbook to block those peers being added
   915  	n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
   916  
   917  	// Start the RPC server before the P2P server
   918  	// so we can eg. receive txs for the first block
   919  	if n.config.RPC.ListenAddress != "" {
   920  		listeners, err := n.startRPC()
   921  		if err != nil {
   922  			return err
   923  		}
   924  		n.rpcListeners = listeners
   925  	}
   926  
   927  	if n.config.Instrumentation.Prometheus &&
   928  		n.config.Instrumentation.PrometheusListenAddr != "" {
   929  		n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
   930  	}
   931  
   932  	// Start the transport.
   933  	addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress))
   934  	if err != nil {
   935  		return err
   936  	}
   937  	if err := n.transport.Listen(*addr); err != nil {
   938  		return err
   939  	}
   940  
   941  	n.isListening = true
   942  
   943  	// Start the switch (the P2P server).
   944  	err = n.sw.Start()
   945  	if err != nil {
   946  		return err
   947  	}
   948  
   949  	// Always connect to persistent peers
   950  	err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
   951  	if err != nil {
   952  		return errors.Wrap(err, "could not dial peers from persistent_peers field")
   953  	}
   954  
   955  	return nil
   956  }
   957  
   958  // OnStop stops the Node. It implements service.Service.
   959  func (n *Node) OnStop() {
   960  	n.BaseService.OnStop()
   961  
   962  	n.Logger.Info("Stopping Node")
   963  
   964  	// first stop the reactors
   965  	n.sw.Stop()
   966  
   967  	// now stop the non-reactor services
   968  	n.blockExec.Stop()
   969  	n.eventBus.Stop()
   970  	n.indexerService.Stop()
   971  
   972  	if err := n.transport.Close(); err != nil {
   973  		n.Logger.Error("Error closing transport", "err", err)
   974  	}
   975  
   976  	n.isListening = false
   977  
   978  	// finally stop the listeners / external services
   979  	for _, l := range n.rpcListeners {
   980  		n.Logger.Info("Closing rpc listener", "listener", l)
   981  		if err := l.Close(); err != nil {
   982  			n.Logger.Error("Error closing listener", "listener", l, "err", err)
   983  		}
   984  	}
   985  
   986  	if pvsc, ok := n.privValidator.(service.Service); ok {
   987  		pvsc.Stop()
   988  	}
   989  
   990  	if n.prometheusSrv != nil {
   991  		if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
   992  			// Error from closing listeners, or context timeout:
   993  			n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
   994  		}
   995  	}
   996  }
   997  
   998  // ConfigureRPC makes sure RPC has all the objects it needs to operate.
   999  func (n *Node) ConfigureRPC() error {
  1000  	pubKey, err := n.privValidator.GetPubKey()
  1001  	if err != nil {
  1002  		return fmt.Errorf("can't get pubkey: %w", err)
  1003  	}
  1004  	rpccore.SetEnvironment(&rpccore.Environment{
  1005  		ProxyAppQuery: n.proxyApp.Query(),
  1006  
  1007  		StateDB:        n.stateDB,
  1008  		BlockStore:     n.blockStore,
  1009  		EvidencePool:   n.evidencePool,
  1010  		ConsensusState: n.consensusState,
  1011  		P2PPeers:       n.sw,
  1012  		P2PTransport:   n,
  1013  
  1014  		PubKey:           pubKey,
  1015  		GenDoc:           n.genesisDoc,
  1016  		TxIndexer:        n.txIndexer,
  1017  		BlockIndexer:     n.indexerService.GetBlockIndexer(),
  1018  		ConsensusReactor: n.consensusReactor,
  1019  		EventBus:         n.eventBus,
  1020  		Mempool:          n.mempool,
  1021  
  1022  		Logger: n.Logger.With("module", "rpc"),
  1023  
  1024  		Config: *n.config.RPC,
  1025  	})
  1026  	return nil
  1027  }
  1028  
  1029  func (n *Node) startRPC() ([]net.Listener, error) {
  1030  	err := n.ConfigureRPC()
  1031  	if err != nil {
  1032  		return nil, err
  1033  	}
  1034  
  1035  	listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
  1036  	coreCodec := amino.NewCodec()
  1037  	ctypes.RegisterAmino(coreCodec)
  1038  
  1039  	if n.config.RPC.Unsafe {
  1040  		rpccore.AddUnsafeRoutes()
  1041  	}
  1042  
  1043  	config := rpcserver.DefaultConfig()
  1044  	config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1045  	config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1046  	config.MaxOpenConnections = n.config.RPC.MaxOpenConnections
  1047  	// If necessary adjust global WriteTimeout to ensure it's greater than
  1048  	// TimeoutBroadcastTxCommit.
  1049  	// See https://github.com/okex/exchain/libs/tendermint/issues/3435
  1050  	if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1051  		config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1052  	}
  1053  
  1054  	// we may expose the rpc over both a unix and tcp socket
  1055  	listeners := make([]net.Listener, len(listenAddrs))
  1056  	for i, listenAddr := range listenAddrs {
  1057  		mux := http.NewServeMux()
  1058  		rpcLogger := n.Logger.With("module", "rpc-server")
  1059  		wmLogger := rpcLogger.With("protocol", "websocket")
  1060  		wm := rpcserver.NewWebsocketManager(rpccore.Routes, coreCodec,
  1061  			rpcserver.OnDisconnect(func(remoteAddr string) {
  1062  				err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr)
  1063  				if err != nil && err != tmpubsub.ErrSubscriptionNotFound {
  1064  					wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err)
  1065  				}
  1066  			}),
  1067  			rpcserver.ReadLimit(config.MaxBodyBytes),
  1068  		)
  1069  		wm.SetLogger(wmLogger)
  1070  		mux.HandleFunc("/websocket", wm.WebsocketHandler)
  1071  		rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger)
  1072  		listener, err := rpcserver.Listen(
  1073  			listenAddr,
  1074  			config,
  1075  		)
  1076  		if err != nil {
  1077  			return nil, err
  1078  		}
  1079  
  1080  		var rootHandler http.Handler = mux
  1081  		if n.config.RPC.IsCorsEnabled() {
  1082  			corsMiddleware := cors.New(cors.Options{
  1083  				AllowedOrigins: n.config.RPC.CORSAllowedOrigins,
  1084  				AllowedMethods: n.config.RPC.CORSAllowedMethods,
  1085  				AllowedHeaders: n.config.RPC.CORSAllowedHeaders,
  1086  			})
  1087  			rootHandler = corsMiddleware.Handler(mux)
  1088  		}
  1089  		if n.config.RPC.IsTLSEnabled() {
  1090  			go rpcserver.ServeTLS(
  1091  				listener,
  1092  				rootHandler,
  1093  				n.config.RPC.CertFile(),
  1094  				n.config.RPC.KeyFile(),
  1095  				rpcLogger,
  1096  				config,
  1097  			)
  1098  		} else {
  1099  			go rpcserver.Serve(
  1100  				listener,
  1101  				rootHandler,
  1102  				rpcLogger,
  1103  				config,
  1104  			)
  1105  		}
  1106  
  1107  		listeners[i] = listener
  1108  	}
  1109  
  1110  	// we expose a simplified api over grpc for convenience to app devs
  1111  	grpcListenAddr := n.config.RPC.GRPCListenAddress
  1112  	if grpcListenAddr != "" {
  1113  		config := rpcserver.DefaultConfig()
  1114  		config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1115  		config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1116  		// NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections
  1117  		config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections
  1118  		// If necessary adjust global WriteTimeout to ensure it's greater than
  1119  		// TimeoutBroadcastTxCommit.
  1120  		// See https://github.com/okex/exchain/libs/tendermint/issues/3435
  1121  		if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1122  			config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1123  		}
  1124  		listener, err := rpcserver.Listen(grpcListenAddr, config)
  1125  		if err != nil {
  1126  			return nil, err
  1127  		}
  1128  		go grpccore.StartGRPCServer(listener)
  1129  		listeners = append(listeners, listener)
  1130  	}
  1131  
  1132  	return listeners, nil
  1133  }
  1134  
  1135  // startPrometheusServer starts a Prometheus HTTP server, listening for metrics
  1136  // collectors on addr.
  1137  func (n *Node) startPrometheusServer(addr string) *http.Server {
  1138  	srv := &http.Server{
  1139  		Addr: addr,
  1140  		Handler: promhttp.InstrumentMetricHandler(
  1141  			prometheus.DefaultRegisterer, promhttp.HandlerFor(
  1142  				prometheus.DefaultGatherer,
  1143  				promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
  1144  			),
  1145  		),
  1146  	}
  1147  	go func() {
  1148  		if err := srv.ListenAndServe(); err != http.ErrServerClosed {
  1149  			// Error starting or closing listener:
  1150  			n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
  1151  		}
  1152  	}()
  1153  	return srv
  1154  }
  1155  
  1156  // Switch returns the Node's Switch.
  1157  func (n *Node) Switch() *p2p.Switch {
  1158  	return n.sw
  1159  }
  1160  
  1161  // BlockStore returns the Node's BlockStore.
  1162  func (n *Node) BlockStore() *store.BlockStore {
  1163  	return n.blockStore
  1164  }
  1165  
  1166  // ConsensusState returns the Node's ConsensusState.
  1167  func (n *Node) ConsensusState() *cs.State {
  1168  	return n.consensusState
  1169  }
  1170  
  1171  // ConsensusReactor returns the Node's ConsensusReactor.
  1172  func (n *Node) ConsensusReactor() *cs.Reactor {
  1173  	return n.consensusReactor
  1174  }
  1175  
  1176  // MempoolReactor returns the Node's mempool reactor.
  1177  func (n *Node) MempoolReactor() *mempl.Reactor {
  1178  	return n.mempoolReactor
  1179  }
  1180  
  1181  // Mempool returns the Node's mempool.
  1182  func (n *Node) Mempool() mempl.Mempool {
  1183  	return n.mempool
  1184  }
  1185  
  1186  // PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled.
  1187  func (n *Node) PEXReactor() *pex.Reactor {
  1188  	return n.pexReactor
  1189  }
  1190  
  1191  // EvidencePool returns the Node's EvidencePool.
  1192  func (n *Node) EvidencePool() *evidence.Pool {
  1193  	return n.evidencePool
  1194  }
  1195  
  1196  // EventBus returns the Node's EventBus.
  1197  func (n *Node) EventBus() *types.EventBus {
  1198  	return n.eventBus
  1199  }
  1200  
  1201  // PrivValidator returns the Node's PrivValidator.
  1202  // XXX: for convenience only!
  1203  func (n *Node) PrivValidator() types.PrivValidator {
  1204  	return n.privValidator
  1205  }
  1206  
  1207  // GenesisDoc returns the Node's GenesisDoc.
  1208  func (n *Node) GenesisDoc() *types.GenesisDoc {
  1209  	return n.genesisDoc
  1210  }
  1211  
  1212  // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
  1213  func (n *Node) ProxyApp() proxy.AppConns {
  1214  	return n.proxyApp
  1215  }
  1216  
  1217  // Config returns the Node's config.
  1218  func (n *Node) Config() *cfg.Config {
  1219  	return n.config
  1220  }
  1221  
  1222  //------------------------------------------------------------------------------
  1223  
  1224  func (n *Node) Listeners() []string {
  1225  	return []string{
  1226  		fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress),
  1227  	}
  1228  }
  1229  
  1230  func (n *Node) IsListening() bool {
  1231  	return n.isListening
  1232  }
  1233  
  1234  // NodeInfo returns the Node's Info from the Switch.
  1235  func (n *Node) NodeInfo() p2p.NodeInfo {
  1236  	return n.nodeInfo
  1237  }
  1238  
  1239  func (n *Node) StateDB() dbm.DB {
  1240  	return n.stateDB
  1241  }
  1242  
  1243  func makeNodeInfo(
  1244  	config *cfg.Config,
  1245  	nodeKey *p2p.NodeKey,
  1246  	txIndexer txindex.TxIndexer,
  1247  	genDoc *types.GenesisDoc,
  1248  	state sm.State,
  1249  ) (p2p.NodeInfo, error) {
  1250  	txIndexerStatus := "on"
  1251  	if _, ok := txIndexer.(*null.TxIndex); ok {
  1252  		txIndexerStatus = "off"
  1253  	}
  1254  
  1255  	var bcChannel byte
  1256  	switch config.FastSync.Version {
  1257  	case "v0":
  1258  		bcChannel = bcv0.BlockchainChannel
  1259  	case "v1":
  1260  		bcChannel = bcv1.BlockchainChannel
  1261  	case "v2":
  1262  		bcChannel = bcv2.BlockchainChannel
  1263  	default:
  1264  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
  1265  	}
  1266  
  1267  	nodeInfo := p2p.DefaultNodeInfo{
  1268  		ProtocolVersion: p2p.NewProtocolVersion(
  1269  			version.P2PProtocol, // global
  1270  			state.Version.Consensus.Block,
  1271  			state.Version.Consensus.App,
  1272  		),
  1273  		DefaultNodeID: nodeKey.ID(),
  1274  		Network:       genDoc.ChainID,
  1275  		Version:       version.TMCoreSemVer,
  1276  		Channels: []byte{
  1277  			bcChannel,
  1278  			cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, cs.ViewChangeChannel,
  1279  			mempl.MempoolChannel,
  1280  			evidence.EvidenceChannel,
  1281  		},
  1282  		Moniker: config.Moniker,
  1283  		Other: p2p.DefaultNodeInfoOther{
  1284  			TxIndex:    txIndexerStatus,
  1285  			RPCAddress: config.RPC.ListenAddress,
  1286  		},
  1287  	}
  1288  
  1289  	if config.P2P.PexReactor {
  1290  		nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  1291  	}
  1292  
  1293  	lAddr := config.P2P.ExternalAddress
  1294  
  1295  	if lAddr == "" {
  1296  		lAddr = config.P2P.ListenAddress
  1297  	}
  1298  
  1299  	nodeInfo.ListenAddr = lAddr
  1300  
  1301  	err := nodeInfo.Validate()
  1302  	return nodeInfo, err
  1303  }
  1304  
  1305  //------------------------------------------------------------------------------
  1306  
  1307  var (
  1308  	genesisDocKey = []byte("genesisDoc")
  1309  )
  1310  
  1311  // LoadStateFromDBOrGenesisDocProvider attempts to load the state from the
  1312  // database, or creates one using the given genesisDocProvider and persists the
  1313  // result to the database. On success this also returns the genesis doc loaded
  1314  // through the given provider.
  1315  func LoadStateFromDBOrGenesisDocProvider(
  1316  	stateDB dbm.DB,
  1317  	genesisDocProvider GenesisDocProvider,
  1318  ) (sm.State, *types.GenesisDoc, error) {
  1319  	// Get genesis doc
  1320  	genDoc, err := loadGenesisDoc(stateDB)
  1321  	if err != nil {
  1322  		genDoc, err = genesisDocProvider()
  1323  		if err != nil {
  1324  			return sm.State{}, nil, err
  1325  		}
  1326  		// save genesis doc to prevent a certain class of user errors (e.g. when it
  1327  		// was changed, accidentally or not). Also good for audit trail.
  1328  		saveGenesisDoc(stateDB, genDoc)
  1329  	}
  1330  	state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
  1331  	if err != nil {
  1332  		return sm.State{}, nil, err
  1333  	}
  1334  	return state, genDoc, nil
  1335  }
  1336  
  1337  // panics if failed to unmarshal bytes
  1338  func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
  1339  	b, err := db.Get(genesisDocKey)
  1340  	if err != nil {
  1341  		panic(err)
  1342  	}
  1343  	if len(b) == 0 {
  1344  		return nil, errors.New("genesis doc not found")
  1345  	}
  1346  	var genDoc *types.GenesisDoc
  1347  	err = cdc.UnmarshalJSON(b, &genDoc)
  1348  	if err != nil {
  1349  		panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b))
  1350  	}
  1351  	return genDoc, nil
  1352  }
  1353  
  1354  // panics if failed to marshal the given genesis document
  1355  func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) {
  1356  	b, err := cdc.MarshalJSON(genDoc)
  1357  	if err != nil {
  1358  		panic(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
  1359  	}
  1360  	db.SetSync(genesisDocKey, b)
  1361  }
  1362  
  1363  func createAndStartPrivValidatorSocketClient(
  1364  	listenAddr string,
  1365  	logger log.Logger,
  1366  ) (types.PrivValidator, error) {
  1367  	pve, err := privval.NewSignerListener(listenAddr, logger)
  1368  	if err != nil {
  1369  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1370  	}
  1371  
  1372  	pvsc, err := privval.NewSignerClient(pve)
  1373  	if err != nil {
  1374  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1375  	}
  1376  
  1377  	// try to get a pubkey from private validate first time
  1378  	_, err = pvsc.GetPubKey()
  1379  	if err != nil {
  1380  		return nil, fmt.Errorf("can't get pubkey: %w", err)
  1381  	}
  1382  
  1383  	const (
  1384  		retries = 50 // 50 * 100ms = 5s total
  1385  		timeout = 100 * time.Millisecond
  1386  	)
  1387  	pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout)
  1388  
  1389  	return pvscWithRetries, nil
  1390  }
  1391  
  1392  // splitAndTrimEmpty slices s into all subslices separated by sep and returns a
  1393  // slice of the string s with all leading and trailing Unicode code points
  1394  // contained in cutset removed. If sep is empty, SplitAndTrim splits after each
  1395  // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
  1396  // -1.  also filter out empty strings, only return non-empty strings.
  1397  func splitAndTrimEmpty(s, sep, cutset string) []string {
  1398  	if s == "" {
  1399  		return []string{}
  1400  	}
  1401  
  1402  	spl := strings.Split(s, sep)
  1403  	nonEmptyStrings := make([]string, 0, len(spl))
  1404  	for i := 0; i < len(spl); i++ {
  1405  		element := strings.Trim(spl[i], cutset)
  1406  		if element != "" {
  1407  			nonEmptyStrings = append(nonEmptyStrings, element)
  1408  		}
  1409  	}
  1410  	return nonEmptyStrings
  1411  }
  1412  
  1413  // this method will work if seeds is null and judge net is main or test
  1414  func setDefaultSeeds(seeds string) string {
  1415  	if seeds == "" {
  1416  		if types.IsMainNet() {
  1417  			seeds = p2p.MAIN_NET_SEEDS
  1418  		}
  1419  
  1420  		if types.IsTestNet() {
  1421  			seeds = p2p.TEST_NET_SEEDS
  1422  		}
  1423  	}
  1424  	return seeds
  1425  }