github.com/klaytn/klaytn@v1.12.1/node/sc/subbridge.go (about)

     1  // Modifications Copyright 2019 The klaytn Authors
     2  // Copyright 2014 The go-ethereum Authors
     3  // This file is part of go-ethereum.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from eth/backend.go (2018/06/04).
    19  // Modified and improved for the klaytn development.
    20  
    21  package sc
    22  
    23  import (
    24  	"context"
    25  	"errors"
    26  	"fmt"
    27  	"io"
    28  	"math/big"
    29  	"net"
    30  	"path"
    31  	"sync"
    32  	"time"
    33  
    34  	"github.com/klaytn/klaytn/accounts"
    35  	"github.com/klaytn/klaytn/accounts/abi/bind"
    36  	"github.com/klaytn/klaytn/accounts/abi/bind/backends"
    37  	"github.com/klaytn/klaytn/api"
    38  	"github.com/klaytn/klaytn/blockchain"
    39  	"github.com/klaytn/klaytn/blockchain/types"
    40  	"github.com/klaytn/klaytn/common"
    41  	"github.com/klaytn/klaytn/crypto"
    42  	"github.com/klaytn/klaytn/event"
    43  	"github.com/klaytn/klaytn/networks/p2p"
    44  	"github.com/klaytn/klaytn/networks/p2p/discover"
    45  	"github.com/klaytn/klaytn/networks/rpc"
    46  	"github.com/klaytn/klaytn/node"
    47  	"github.com/klaytn/klaytn/node/cn/filters"
    48  	"github.com/klaytn/klaytn/node/sc/bridgepool"
    49  	"github.com/klaytn/klaytn/node/sc/kas"
    50  	"github.com/klaytn/klaytn/params"
    51  	"github.com/klaytn/klaytn/storage/database"
    52  	"github.com/klaytn/klaytn/work"
    53  )
    54  
    55  const (
    56  	forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available
    57  
    58  	chanReqVTevanSize    = 10000
    59  	chanHandleVTevanSize = 10000
    60  
    61  	resetBridgeCycle   = 3 * time.Second
    62  	restoreBridgeCycle = 3 * time.Second
    63  )
    64  
    65  // RemoteBackendInterface wraps methods for remote backend
    66  type RemoteBackendInterface interface {
    67  	bind.ContractBackend
    68  	TransactionReceiptRpcOutput(ctx context.Context, txHash common.Hash) (map[string]interface{}, error)
    69  	BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error)
    70  }
    71  
    72  // Backend wraps all methods for local and remote backend
    73  type Backend interface {
    74  	bind.ContractBackend
    75  	CurrentBlockNumber(context.Context) (uint64, error)
    76  	BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error)
    77  }
    78  
    79  // NodeInfo represents a short summary of the ServiceChain sub-protocol metadata
    80  // known about the host peer.
    81  type SubBridgeInfo struct {
    82  	Network uint64              `json:"network"` // Klaytn network ID
    83  	Genesis common.Hash         `json:"genesis"` // SHA3 hash of the host's genesis block
    84  	Config  *params.ChainConfig `json:"config"`  // Chain configuration for the fork rules
    85  	Head    common.Hash         `json:"head"`    // SHA3 hash of the host's best owned block
    86  	ChainID *big.Int            `json:"chainid"` // ChainID
    87  }
    88  
    89  //go:generate mockgen -destination=bridgeTxPool_mock_test.go -package=sc github.com/klaytn/klaytn/node/sc BridgeTxPool
    90  type BridgeTxPool interface {
    91  	GetMaxTxNonce(from *common.Address) uint64
    92  	AddLocal(tx *types.Transaction) error
    93  	Stats() int
    94  	Pending() map[common.Address]types.Transactions
    95  	Get(hash common.Hash) *types.Transaction
    96  	RemoveTx(tx *types.Transaction) error
    97  	PendingTxHashesByAddress(from *common.Address, limit int) []common.Hash
    98  	PendingTxsByAddress(from *common.Address, limit int) types.Transactions
    99  	Stop()
   100  }
   101  
   102  // SubBridge implements the Klaytn consensus node service.
   103  type SubBridge struct {
   104  	config *SCConfig
   105  
   106  	// DB interfaces
   107  	chainDB database.DBManager // Block chain database
   108  
   109  	eventMux       *event.TypeMux
   110  	accountManager *accounts.Manager
   111  
   112  	networkId     uint64
   113  	netRPCService *api.PublicNetAPI
   114  
   115  	lock sync.RWMutex // Protects the variadic fields (e.g. gas price and coinbase)
   116  
   117  	bridgeServer p2p.Server
   118  	ctx          *node.ServiceContext
   119  	maxPeers     int
   120  
   121  	APIBackend *SubBridgeAPI
   122  
   123  	// channels for fetcher, syncer, txsyncLoop
   124  	newPeerCh    chan BridgePeer
   125  	addPeerCh    chan struct{}
   126  	noMorePeers  chan struct{}
   127  	removePeerCh chan struct{}
   128  	quitSync     chan struct{}
   129  
   130  	// wait group is used for graceful shutdowns during downloading and processing
   131  	pmwg sync.WaitGroup
   132  
   133  	blockchain   *blockchain.BlockChain
   134  	txPool       *blockchain.TxPool
   135  	bridgeTxPool BridgeTxPool
   136  
   137  	// chain event
   138  	chainCh  chan blockchain.ChainEvent
   139  	chainSub event.Subscription
   140  	logsCh   chan []*types.Log
   141  	logsSub  event.Subscription
   142  
   143  	// If this channel can't be read immediately, it can lock service chain tx pool.
   144  	// Commented out because for now, it doesn't need.
   145  	// txCh         chan blockchain.NewTxsEvent
   146  	// txSub        event.Subscription
   147  
   148  	peers        *bridgePeerSet
   149  	handler      *SubBridgeHandler
   150  	eventhandler *ChildChainEventHandler
   151  
   152  	// bridgemanager for value exchange
   153  	localBackend  Backend
   154  	remoteBackend Backend
   155  	bridgeManager *BridgeManager
   156  
   157  	chanReqVTev        chan RequestValueTransferEvent
   158  	chanReqVTencodedEv chan RequestValueTransferEncodedEvent
   159  	reqVTevSub         event.Subscription
   160  	reqVTencodedEvSub  event.Subscription
   161  	chanHandleVTev     chan *HandleValueTransferEvent
   162  	handleVTevSub      event.Subscription
   163  
   164  	bridgeAccounts *BridgeAccounts
   165  
   166  	bootFail bool
   167  
   168  	// service on/off
   169  	onAnchoringTx bool
   170  
   171  	rpcConn   net.Conn
   172  	rpcSendCh chan []byte
   173  
   174  	// KAS Anchor
   175  	kasAnchor *kas.Anchor
   176  }
   177  
   178  // New creates a new CN object (including the
   179  // initialisation of the common CN object)
   180  func NewSubBridge(ctx *node.ServiceContext, config *SCConfig) (*SubBridge, error) {
   181  	chainDB := CreateDB(ctx, config, "subbridgedata")
   182  
   183  	sb := &SubBridge{
   184  		config:         config,
   185  		chainDB:        chainDB,
   186  		peers:          newBridgePeerSet(),
   187  		newPeerCh:      make(chan BridgePeer),
   188  		addPeerCh:      make(chan struct{}),
   189  		removePeerCh:   make(chan struct{}),
   190  		noMorePeers:    make(chan struct{}),
   191  		eventMux:       ctx.EventMux,
   192  		accountManager: ctx.AccountManager,
   193  		networkId:      config.NetworkId,
   194  		ctx:            ctx,
   195  		chainCh:        make(chan blockchain.ChainEvent, chainEventChanSize),
   196  		logsCh:         make(chan []*types.Log, chainLogChanSize),
   197  		// txCh:            make(chan blockchain.NewTxsEvent, transactionChanSize),
   198  		chanReqVTev:        make(chan RequestValueTransferEvent, chanReqVTevanSize),
   199  		chanReqVTencodedEv: make(chan RequestValueTransferEncodedEvent, chanReqVTevanSize),
   200  		chanHandleVTev:     make(chan *HandleValueTransferEvent, chanHandleVTevanSize),
   201  		quitSync:           make(chan struct{}),
   202  		maxPeers:           config.MaxPeer,
   203  		onAnchoringTx:      config.Anchoring,
   204  		bootFail:           false,
   205  		rpcSendCh:          make(chan []byte),
   206  	}
   207  	// TODO-Klaytn change static config to user define config
   208  	bridgetxConfig := bridgepool.BridgeTxPoolConfig{
   209  		ParentChainID: new(big.Int).SetUint64(config.ParentChainID),
   210  		Journal:       path.Join(config.DataDir, "bridge_transactions.rlp"),
   211  		Rejournal:     time.Hour,
   212  		GlobalQueue:   8192,
   213  	}
   214  
   215  	logger.Info("Initialising Klaytn-Bridge protocol", "network", config.NetworkId)
   216  	sb.APIBackend = &SubBridgeAPI{sb}
   217  
   218  	sb.bridgeTxPool = bridgepool.NewBridgeTxPool(bridgetxConfig)
   219  
   220  	var err error
   221  	sb.bridgeAccounts, err = NewBridgeAccounts(sb.accountManager, config.DataDir, chainDB, sb.config.ServiceChainParentOperatorGasLimit, sb.config.ServiceChainChildOperatorGasLimit)
   222  	if err != nil {
   223  		return nil, err
   224  	}
   225  	sb.handler, err = NewSubBridgeHandler(sb)
   226  	if err != nil {
   227  		return nil, err
   228  	}
   229  	sb.eventhandler, err = NewChildChainEventHandler(sb, sb.handler)
   230  	if err != nil {
   231  		return nil, err
   232  	}
   233  	sb.bridgeAccounts.pAccount.SetChainID(new(big.Int).SetUint64(config.ParentChainID))
   234  
   235  	return sb, nil
   236  }
   237  
   238  func (sb *SubBridge) SetRPCConn(conn net.Conn) {
   239  	sb.rpcConn = conn
   240  
   241  	go func() {
   242  		for {
   243  			data := make([]byte, rpcBufferSize)
   244  			rlen, err := sb.rpcConn.Read(data)
   245  			if err != nil {
   246  				if err == io.EOF {
   247  					logger.Trace("EOF from the rpc pipe")
   248  					time.Sleep(100 * time.Millisecond)
   249  					continue
   250  				} else {
   251  					// If no one closes the pipe, this situation should not happen.
   252  					logger.Error("failed to read from the rpc pipe", "err", err, "rlen", rlen)
   253  					return
   254  				}
   255  			}
   256  			sb.rpcSendCh <- data[:rlen]
   257  		}
   258  	}()
   259  }
   260  
   261  func (sb *SubBridge) SendRPCData(data []byte) error {
   262  	peers := sb.BridgePeerSet().peers
   263  	logger.Trace("send rpc message from the subbridge", "len", len(data), "peers", len(peers))
   264  	for _, peer := range peers {
   265  		err := peer.SendRequestRPC(data)
   266  		if err != nil {
   267  			logger.Error("SendRPCData Error", "err", err)
   268  		}
   269  		return err
   270  	}
   271  	logger.Trace("send rpc message from the subbridge, done")
   272  
   273  	return nil
   274  }
   275  
   276  // implement PeerSetManager
   277  func (sb *SubBridge) BridgePeerSet() *bridgePeerSet {
   278  	return sb.peers
   279  }
   280  
   281  func (sb *SubBridge) GetBridgeTxPool() BridgeTxPool {
   282  	return sb.bridgeTxPool
   283  }
   284  
   285  func (sb *SubBridge) GetAnchoringTx() bool {
   286  	return sb.onAnchoringTx
   287  }
   288  
   289  func (sb *SubBridge) SetAnchoringTx(flag bool) bool {
   290  	if sb.onAnchoringTx != flag && flag {
   291  		sb.handler.txCountStartingBlockNumber = 0
   292  	}
   293  	sb.onAnchoringTx = flag
   294  	return sb.GetAnchoringTx()
   295  }
   296  
   297  // APIs returns the collection of RPC services the ethereum package offers.
   298  // NOTE, some of these services probably need to be moved to somewhere else.
   299  func (sb *SubBridge) APIs() []rpc.API {
   300  	// Append all the local APIs and return
   301  	return []rpc.API{
   302  		{
   303  			Namespace: "subbridge",
   304  			Version:   "1.0",
   305  			Service:   sb.APIBackend,
   306  			Public:    true,
   307  		},
   308  		{
   309  			Namespace: "subbridge",
   310  			Version:   "1.0",
   311  			Service:   sb.netRPCService,
   312  			Public:    true,
   313  		},
   314  	}
   315  }
   316  
   317  func (sb *SubBridge) AccountManager() *accounts.Manager { return sb.accountManager }
   318  func (sb *SubBridge) EventMux() *event.TypeMux          { return sb.eventMux }
   319  func (sb *SubBridge) ChainDB() database.DBManager       { return sb.chainDB }
   320  func (sb *SubBridge) IsListening() bool                 { return true } // Always listening
   321  func (sb *SubBridge) ProtocolVersion() int              { return int(sb.SCProtocol().Versions[0]) }
   322  func (sb *SubBridge) NetVersion() uint64                { return sb.networkId }
   323  
   324  func (sb *SubBridge) Components() []interface{} {
   325  	return nil
   326  }
   327  
   328  func (sb *SubBridge) SetComponents(components []interface{}) {
   329  	for _, component := range components {
   330  		switch v := component.(type) {
   331  		case *blockchain.BlockChain:
   332  			sb.blockchain = v
   333  
   334  			kasConfig := &kas.KASConfig{
   335  				Url:            sb.config.KASAnchorUrl,
   336  				XChainId:       sb.config.KASXChainId,
   337  				User:           sb.config.KASAccessKey,
   338  				Pwd:            sb.config.KASSecretKey,
   339  				Operator:       common.HexToAddress(sb.config.KASAnchorOperator),
   340  				Anchor:         sb.config.KASAnchor,
   341  				AnchorPeriod:   sb.config.KASAnchorPeriod,
   342  				RequestTimeout: sb.config.KASAnchorRequestTimeout,
   343  			}
   344  			sb.kasAnchor = kas.NewKASAnchor(kasConfig, sb.chainDB, v)
   345  
   346  			// event from core-service
   347  			sb.chainSub = sb.blockchain.SubscribeChainEvent(sb.chainCh)
   348  			sb.logsSub = sb.blockchain.SubscribeLogsEvent(sb.logsCh)
   349  			sb.bridgeAccounts.cAccount.SetChainID(v.Config().ChainID)
   350  		case *blockchain.TxPool:
   351  			sb.txPool = v
   352  			// event from core-service
   353  			// sb.txSub = sb.txPool.SubscribeNewTxsEvent(sb.txCh)
   354  		// TODO-Klaytn if need pending block, should use miner
   355  		case *work.Miner:
   356  		}
   357  	}
   358  
   359  	var err error
   360  	if sb.config.EnabledSubBridge {
   361  		sb.remoteBackend, err = NewRemoteBackend(sb)
   362  		if err != nil {
   363  			logger.Error("fail to initialize RemoteBackend", "err", err)
   364  			sb.bootFail = true
   365  			return
   366  		}
   367  	}
   368  
   369  	es := filters.NewEventSystem(sb.eventMux, &filterLocalBackend{sb}, false)
   370  	sb.localBackend = backends.NewBlockchainContractBackend(sb.blockchain, sb.txPool, es)
   371  
   372  	sb.bridgeManager, err = NewBridgeManager(sb)
   373  	if err != nil {
   374  		logger.Error("fail to initialize BridgeManager", "err", err)
   375  		sb.bootFail = true
   376  		return
   377  	}
   378  	sb.reqVTevSub = sb.bridgeManager.SubscribeReqVTev(sb.chanReqVTev)
   379  	sb.reqVTencodedEvSub = sb.bridgeManager.SubscribeReqVTencodedEv(sb.chanReqVTencodedEv)
   380  	sb.handleVTevSub = sb.bridgeManager.SubscribeHandleVTev(sb.chanHandleVTev)
   381  
   382  	sb.pmwg.Add(1)
   383  	go sb.restoreBridgeLoop()
   384  
   385  	sb.pmwg.Add(1)
   386  	go sb.resetBridgeLoop()
   387  
   388  	sb.bridgeAccounts.cAccount.SetNonce(sb.txPool.GetPendingNonce(sb.bridgeAccounts.cAccount.address))
   389  
   390  	sb.pmwg.Add(1)
   391  	go sb.loop()
   392  }
   393  
   394  // Protocols implements node.Service, returning all the currently configured
   395  // network protocols to start.
   396  func (sb *SubBridge) Protocols() []p2p.Protocol {
   397  	return []p2p.Protocol{}
   398  }
   399  
   400  func (sb *SubBridge) SCProtocol() SCProtocol {
   401  	return SCProtocol{
   402  		Name:     SCProtocolName,
   403  		Versions: SCProtocolVersion,
   404  		Lengths:  SCProtocolLength,
   405  	}
   406  }
   407  
   408  // NodeInfo retrieves some protocol metadata about the running host node.
   409  func (sb *SubBridge) NodeInfo() *SubBridgeInfo {
   410  	currentBlock := sb.blockchain.CurrentBlock()
   411  	return &SubBridgeInfo{
   412  		Network: sb.networkId,
   413  		Genesis: sb.blockchain.Genesis().Hash(),
   414  		Config:  sb.blockchain.Config(),
   415  		Head:    currentBlock.Hash(),
   416  		ChainID: sb.blockchain.Config().ChainID,
   417  	}
   418  }
   419  
   420  // getChainID returns the current chain id.
   421  func (sb *SubBridge) getChainID() *big.Int {
   422  	return sb.blockchain.Config().ChainID
   423  }
   424  
   425  // Start implements node.Service, starting all internal goroutines needed by the
   426  // Klaytn protocol implementation.
   427  func (sb *SubBridge) Start(srvr p2p.Server) error {
   428  	if sb.bootFail {
   429  		return errors.New("subBridge node fail to start")
   430  	}
   431  
   432  	serverConfig := p2p.Config{}
   433  	serverConfig.PrivateKey = sb.ctx.NodeKey()
   434  	serverConfig.Name = sb.ctx.NodeType().String()
   435  	serverConfig.Logger = logger
   436  	serverConfig.NoListen = true
   437  	serverConfig.MaxPhysicalConnections = sb.maxPeers
   438  	serverConfig.NoDiscovery = true
   439  	serverConfig.EnableMultiChannelServer = false
   440  
   441  	// connect to mainbridge as outbound
   442  	serverConfig.StaticNodes = sb.config.MainBridges()
   443  
   444  	p2pServer := p2p.NewServer(serverConfig)
   445  
   446  	sb.bridgeServer = p2pServer
   447  
   448  	scprotocols := make([]p2p.Protocol, 0, len(sb.SCProtocol().Versions))
   449  	for i, version := range sb.SCProtocol().Versions {
   450  		// Compatible; initialise the sub-protocol
   451  		version := version
   452  		scprotocols = append(scprotocols, p2p.Protocol{
   453  			Name:    sb.SCProtocol().Name,
   454  			Version: version,
   455  			Length:  sb.SCProtocol().Lengths[i],
   456  			Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   457  				peer := sb.newPeer(int(version), p, rw)
   458  				pubKey, _ := p.ID().Pubkey()
   459  				addr := crypto.PubkeyToAddress(*pubKey)
   460  				peer.SetAddr(addr)
   461  				select {
   462  				case sb.newPeerCh <- peer:
   463  					return sb.handle(peer)
   464  				case <-sb.quitSync:
   465  					return p2p.DiscQuitting
   466  				}
   467  			},
   468  			NodeInfo: func() interface{} {
   469  				return sb.NodeInfo()
   470  			},
   471  			PeerInfo: func(id discover.NodeID) interface{} {
   472  				if p := sb.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
   473  					return p.Info()
   474  				}
   475  				return nil
   476  			},
   477  		})
   478  	}
   479  	sb.bridgeServer.AddProtocols(scprotocols)
   480  
   481  	if err := p2pServer.Start(); err != nil {
   482  		return errors.New("fail to bridgeserver start")
   483  	}
   484  
   485  	// Start the RPC service
   486  	sb.netRPCService = api.NewPublicNetAPI(sb.bridgeServer, sb.NetVersion())
   487  
   488  	// Figure out a max peers count based on the server limits
   489  	//sb.maxPeers = sb.bridgeServer.MaxPhysicalConnections()
   490  	//validator := func(header *types.Header) error {
   491  	//	return nil
   492  	//}
   493  	//heighter := func() uint64 {
   494  	//	return sb.blockchain.CurrentBlock().NumberU64()
   495  	//}
   496  	//inserter := func(blocks types.Blocks) (int, error) {
   497  	//	return 0, nil
   498  	//}
   499  	//sb.fetcher = fetcher.New(sb.GetBlockByHash, validator, sb.BroadcastBlock, heighter, inserter, sb.removePeer)
   500  
   501  	go sb.syncer()
   502  
   503  	return nil
   504  }
   505  
   506  func (sb *SubBridge) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) BridgePeer {
   507  	return newBridgePeer(pv, p, newMeteredMsgWriter(rw))
   508  }
   509  
   510  func (sb *SubBridge) handle(p BridgePeer) error {
   511  	// Ignore maxPeers if this is a trusted peer
   512  	if sb.peers.Len() >= sb.maxPeers && !p.GetP2PPeer().Info().Networks[p2p.ConnDefault].Trusted {
   513  		return p2p.DiscTooManyPeers
   514  	}
   515  	p.GetP2PPeer().Log().Debug("Klaytn peer connected", "name", p.GetP2PPeer().Name())
   516  
   517  	// Execute the handshake
   518  	var (
   519  		head   = sb.blockchain.CurrentHeader()
   520  		hash   = head.Hash()
   521  		number = head.Number.Uint64()
   522  		td     = sb.blockchain.GetTd(hash, number)
   523  	)
   524  
   525  	err := p.Handshake(sb.networkId, sb.getChainID(), td, hash)
   526  	if err != nil {
   527  		p.GetP2PPeer().Log().Debug("Klaytn peer handshake failed", "err", err)
   528  		fmt.Println(err)
   529  		return err
   530  	}
   531  
   532  	// Register the peer locally
   533  	if err := sb.peers.Register(p); err != nil {
   534  		// if starting node with unlock account, can't register peer until finish unlock
   535  		p.GetP2PPeer().Log().Info("Klaytn peer registration failed", "err", err)
   536  		fmt.Println(err)
   537  		return err
   538  	}
   539  	defer sb.removePeer(p.GetID())
   540  
   541  	sb.handler.RegisterNewPeer(p)
   542  
   543  	p.GetP2PPeer().Log().Info("Added a P2P Peer", "peerID", p.GetP2PPeerID())
   544  
   545  	// main loop. handle incoming messages.
   546  	for {
   547  		if err := sb.handleMsg(p); err != nil {
   548  			p.GetP2PPeer().Log().Debug("Klaytn message handling failed", "err", err)
   549  			return err
   550  		}
   551  	}
   552  }
   553  
   554  func (sb *SubBridge) resetBridgeLoop() {
   555  	defer sb.pmwg.Done()
   556  
   557  	ticker := time.NewTicker(resetBridgeCycle)
   558  	defer ticker.Stop()
   559  
   560  	peerCount := 0
   561  	needResetSubscription := false
   562  
   563  	for {
   564  		select {
   565  		case <-sb.quitSync:
   566  			return
   567  		case <-sb.addPeerCh:
   568  			peerCount++
   569  		case <-sb.removePeerCh:
   570  			peerCount--
   571  			if peerCount == 0 {
   572  				needResetSubscription = true
   573  				sb.handler.setParentOperatorNonceSynced(false)
   574  			}
   575  		case <-ticker.C:
   576  			if needResetSubscription && peerCount > 0 {
   577  				err := sb.bridgeManager.ResetAllSubscribedEvents()
   578  				if err == nil {
   579  					needResetSubscription = false
   580  				}
   581  			}
   582  		}
   583  	}
   584  }
   585  
   586  func (sb *SubBridge) restoreBridgeLoop() {
   587  	defer sb.pmwg.Done()
   588  
   589  	ticker := time.NewTicker(restoreBridgeCycle)
   590  	defer ticker.Stop()
   591  
   592  	for {
   593  		select {
   594  		case <-sb.quitSync:
   595  			return
   596  		case <-ticker.C:
   597  			if err := sb.bridgeManager.RestoreBridges(); err != nil {
   598  				logger.Debug("failed to sb.bridgeManager.RestoreBridges()", "err", err)
   599  				continue
   600  			}
   601  			return
   602  		}
   603  	}
   604  }
   605  
   606  func (sb *SubBridge) loop() {
   607  	defer sb.pmwg.Done()
   608  
   609  	// Keep waiting for and reacting to the various events
   610  	for {
   611  		select {
   612  		case sendData := <-sb.rpcSendCh:
   613  			sb.SendRPCData(sendData)
   614  		// Handle ChainHeadEvent
   615  		case ev := <-sb.chainCh:
   616  			if ev.Block != nil {
   617  				if err := sb.eventhandler.HandleChainHeadEvent(ev.Block); err != nil {
   618  					logger.Error("subbridge block event", "err", err)
   619  				}
   620  
   621  				sb.kasAnchor.AnchorPeriodicBlock(ev.Block)
   622  			} else {
   623  				logger.Error("subbridge block event is nil")
   624  			}
   625  		// Handle NewTexsEvent
   626  		// case ev := <-sb.txCh:
   627  		//	if ev.Txs != nil {
   628  		//		if err := sb.eventhandler.HandleTxsEvent(ev.Txs); err != nil {
   629  		//			logger.Error("subbridge tx event", "err", err)
   630  		//		}
   631  		//	} else {
   632  		//		logger.Error("subbridge tx event is nil")
   633  		//	}
   634  		// Handle ChainLogsEvent
   635  		case logs := <-sb.logsCh:
   636  			if err := sb.eventhandler.HandleLogsEvent(logs); err != nil {
   637  				logger.Error("subbridge log event", "err", err)
   638  			}
   639  		// Handle Bridge Event
   640  		case ev := <-sb.chanReqVTev:
   641  			vtRequestEventMeter.Mark(1)
   642  			if err := sb.eventhandler.ProcessRequestEvent(ev); err != nil {
   643  				logger.Error("fail to process request value transfer event ", "err", err)
   644  			}
   645  		case ev := <-sb.chanReqVTencodedEv:
   646  			vtRequestEventMeter.Mark(1)
   647  			if err := sb.eventhandler.ProcessRequestEvent(ev); err != nil {
   648  				logger.Error("fail to process request value transfer event ", "err", err)
   649  			}
   650  		case ev := <-sb.chanHandleVTev:
   651  			vtHandleEventMeter.Mark(1)
   652  			if err := sb.eventhandler.ProcessHandleEvent(ev); err != nil {
   653  				logger.Error("fail to process handle value transfer event ", "err", err)
   654  			}
   655  		case err := <-sb.chainSub.Err():
   656  			if err != nil {
   657  				logger.Error("subbridge block subscription ", "err", err)
   658  			}
   659  			return
   660  		// case err := <-sb.txSub.Err():
   661  		//	if err != nil {
   662  		//		logger.Error("subbridge tx subscription ", "err", err)
   663  		//	}
   664  		//	return
   665  		case err := <-sb.logsSub.Err():
   666  			if err != nil {
   667  				logger.Error("subbridge log subscription ", "err", err)
   668  			}
   669  			return
   670  		case err := <-sb.reqVTevSub.Err():
   671  			if err != nil {
   672  				logger.Error("subbridge token-received subscription ", "err", err)
   673  			}
   674  			return
   675  		case err := <-sb.reqVTencodedEvSub.Err():
   676  			if err != nil {
   677  				logger.Error("subbridge token-received subscription ", "err", err)
   678  			}
   679  			return
   680  		case err := <-sb.handleVTevSub.Err():
   681  			if err != nil {
   682  				logger.Error("subbridge token-transfer subscription ", "err", err)
   683  			}
   684  			return
   685  		}
   686  	}
   687  }
   688  
   689  func (sb *SubBridge) removePeer(id string) {
   690  	sb.removePeerCh <- struct{}{}
   691  
   692  	// Short circuit if the peer was already removed
   693  	peer := sb.peers.Peer(id)
   694  	if peer == nil {
   695  		return
   696  	}
   697  	logger.Debug("Removing Klaytn peer", "peer", id)
   698  
   699  	if err := sb.peers.Unregister(id); err != nil {
   700  		logger.Error("Peer removal failed", "peer", id, "err", err)
   701  	}
   702  	// Hard disconnect at the networking layer
   703  	if peer != nil {
   704  		peer.GetP2PPeer().Disconnect(p2p.DiscUselessPeer)
   705  	}
   706  }
   707  
   708  // handleMsg is invoked whenever an inbound message is received from a remote
   709  // peer. The remote connection is torn down upon returning any error.
   710  func (sb *SubBridge) handleMsg(p BridgePeer) error {
   711  	// Below message size checking is done by handle().
   712  	// Read the next message from the remote peer, and ensure it's fully consumed
   713  	msg, err := p.GetRW().ReadMsg()
   714  	if err != nil {
   715  		p.GetP2PPeer().Log().Warn("ProtocolManager failed to read msg", "err", err)
   716  		return err
   717  	}
   718  	if msg.Size > ProtocolMaxMsgSize {
   719  		err := errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
   720  		p.GetP2PPeer().Log().Warn("ProtocolManager over max msg size", "err", err)
   721  		return err
   722  	}
   723  	defer msg.Discard()
   724  
   725  	return sb.handler.HandleMainMsg(p, msg)
   726  }
   727  
   728  func (sb *SubBridge) syncer() {
   729  	// Start and ensure cleanup of sync mechanisms
   730  	// pm.fetcher.Start()
   731  	// defer pm.fetcher.Stop()
   732  	// defer pm.downloader.Terminate()
   733  
   734  	// Wait for different events to fire synchronisation operations
   735  	forceSync := time.NewTicker(forceSyncCycle)
   736  	defer forceSync.Stop()
   737  
   738  	for {
   739  		select {
   740  		case peer := <-sb.newPeerCh:
   741  			go sb.synchronise(peer)
   742  
   743  		case <-forceSync.C:
   744  			// Force a sync even if not enough peers are present
   745  			go sb.synchronise(sb.peers.BestPeer())
   746  
   747  		case <-sb.noMorePeers:
   748  			return
   749  		}
   750  	}
   751  }
   752  
   753  func (sb *SubBridge) synchronise(peer BridgePeer) {
   754  	// @TODO Klaytn ServiceChain Sync
   755  }
   756  
   757  // Stop implements node.Service, terminating all internal goroutines used by the
   758  // Klaytn protocol.
   759  func (sb *SubBridge) Stop() error {
   760  	close(sb.quitSync)
   761  	sb.bridgeManager.stopAllRecoveries()
   762  
   763  	sb.chainSub.Unsubscribe()
   764  	// sb.txSub.Unsubscribe()
   765  	sb.logsSub.Unsubscribe()
   766  	sb.reqVTevSub.Unsubscribe()
   767  	sb.reqVTencodedEvSub.Unsubscribe()
   768  	sb.handleVTevSub.Unsubscribe()
   769  	sb.eventMux.Stop()
   770  	sb.chainDB.Close()
   771  
   772  	sb.bridgeManager.Stop()
   773  	sb.bridgeTxPool.Stop()
   774  	sb.bridgeServer.Stop()
   775  
   776  	return nil
   777  }