github.com/klaytn/klaytn@v1.10.2/node/sc/subbridge.go (about)

     1  // Modifications Copyright 2019 The klaytn Authors
     2  // Copyright 2014 The go-ethereum Authors
     3  // This file is part of go-ethereum.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from eth/backend.go (2018/06/04).
    19  // Modified and improved for the klaytn development.
    20  
    21  package sc
    22  
    23  import (
    24  	"context"
    25  	"errors"
    26  	"fmt"
    27  	"io"
    28  	"math/big"
    29  	"net"
    30  	"path"
    31  	"sync"
    32  	"time"
    33  
    34  	"github.com/klaytn/klaytn/accounts"
    35  	"github.com/klaytn/klaytn/accounts/abi/bind"
    36  	"github.com/klaytn/klaytn/api"
    37  	"github.com/klaytn/klaytn/blockchain"
    38  	"github.com/klaytn/klaytn/blockchain/types"
    39  	"github.com/klaytn/klaytn/common"
    40  	"github.com/klaytn/klaytn/crypto"
    41  	"github.com/klaytn/klaytn/event"
    42  	"github.com/klaytn/klaytn/networks/p2p"
    43  	"github.com/klaytn/klaytn/networks/p2p/discover"
    44  	"github.com/klaytn/klaytn/networks/rpc"
    45  	"github.com/klaytn/klaytn/node"
    46  	"github.com/klaytn/klaytn/node/sc/bridgepool"
    47  	"github.com/klaytn/klaytn/node/sc/kas"
    48  	"github.com/klaytn/klaytn/params"
    49  	"github.com/klaytn/klaytn/storage/database"
    50  	"github.com/klaytn/klaytn/work"
    51  )
    52  
    53  const (
    54  	forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available
    55  
    56  	chanReqVTevanSize    = 10000
    57  	chanHandleVTevanSize = 10000
    58  
    59  	resetBridgeCycle   = 3 * time.Second
    60  	restoreBridgeCycle = 3 * time.Second
    61  )
    62  
    63  // RemoteBackendInterface wraps methods for remote backend
    64  type RemoteBackendInterface interface {
    65  	bind.ContractBackend
    66  	TransactionReceiptRpcOutput(ctx context.Context, txHash common.Hash) (map[string]interface{}, error)
    67  	BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error)
    68  }
    69  
    70  // Backend wraps all methods for local and remote backend
    71  type Backend interface {
    72  	bind.ContractBackend
    73  	CurrentBlockNumber(context.Context) (uint64, error)
    74  	BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error)
    75  }
    76  
    77  // NodeInfo represents a short summary of the ServiceChain sub-protocol metadata
    78  // known about the host peer.
    79  type SubBridgeInfo struct {
    80  	Network uint64              `json:"network"` // Klaytn network ID
    81  	Genesis common.Hash         `json:"genesis"` // SHA3 hash of the host's genesis block
    82  	Config  *params.ChainConfig `json:"config"`  // Chain configuration for the fork rules
    83  	Head    common.Hash         `json:"head"`    // SHA3 hash of the host's best owned block
    84  	ChainID *big.Int            `json:"chainid"` // ChainID
    85  }
    86  
    87  //go:generate mockgen -destination=bridgeTxPool_mock_test.go -package=sc github.com/klaytn/klaytn/node/sc BridgeTxPool
    88  type BridgeTxPool interface {
    89  	GetMaxTxNonce(from *common.Address) uint64
    90  	AddLocal(tx *types.Transaction) error
    91  	Stats() int
    92  	Pending() map[common.Address]types.Transactions
    93  	Get(hash common.Hash) *types.Transaction
    94  	RemoveTx(tx *types.Transaction) error
    95  	PendingTxHashesByAddress(from *common.Address, limit int) []common.Hash
    96  	PendingTxsByAddress(from *common.Address, limit int) types.Transactions
    97  	Stop()
    98  }
    99  
   100  // SubBridge implements the Klaytn consensus node service.
   101  type SubBridge struct {
   102  	config *SCConfig
   103  
   104  	// DB interfaces
   105  	chainDB database.DBManager // Block chain database
   106  
   107  	eventMux       *event.TypeMux
   108  	accountManager *accounts.Manager
   109  
   110  	networkId     uint64
   111  	netRPCService *api.PublicNetAPI
   112  
   113  	lock sync.RWMutex // Protects the variadic fields (e.g. gas price and coinbase)
   114  
   115  	bridgeServer p2p.Server
   116  	ctx          *node.ServiceContext
   117  	maxPeers     int
   118  
   119  	APIBackend *SubBridgeAPI
   120  
   121  	// channels for fetcher, syncer, txsyncLoop
   122  	newPeerCh    chan BridgePeer
   123  	addPeerCh    chan struct{}
   124  	noMorePeers  chan struct{}
   125  	removePeerCh chan struct{}
   126  	quitSync     chan struct{}
   127  
   128  	// wait group is used for graceful shutdowns during downloading and processing
   129  	pmwg sync.WaitGroup
   130  
   131  	blockchain   *blockchain.BlockChain
   132  	txPool       *blockchain.TxPool
   133  	bridgeTxPool BridgeTxPool
   134  
   135  	// chain event
   136  	chainCh  chan blockchain.ChainEvent
   137  	chainSub event.Subscription
   138  	logsCh   chan []*types.Log
   139  	logsSub  event.Subscription
   140  
   141  	// If this channel can't be read immediately, it can lock service chain tx pool.
   142  	// Commented out because for now, it doesn't need.
   143  	// txCh         chan blockchain.NewTxsEvent
   144  	// txSub        event.Subscription
   145  
   146  	peers        *bridgePeerSet
   147  	handler      *SubBridgeHandler
   148  	eventhandler *ChildChainEventHandler
   149  
   150  	// bridgemanager for value exchange
   151  	localBackend  Backend
   152  	remoteBackend Backend
   153  	bridgeManager *BridgeManager
   154  
   155  	chanReqVTev        chan RequestValueTransferEvent
   156  	chanReqVTencodedEv chan RequestValueTransferEncodedEvent
   157  	reqVTevSub         event.Subscription
   158  	reqVTencodedEvSub  event.Subscription
   159  	chanHandleVTev     chan *HandleValueTransferEvent
   160  	handleVTevSub      event.Subscription
   161  
   162  	bridgeAccounts *BridgeAccounts
   163  
   164  	bootFail bool
   165  
   166  	// service on/off
   167  	onAnchoringTx bool
   168  
   169  	rpcConn   net.Conn
   170  	rpcSendCh chan []byte
   171  
   172  	// KAS Anchor
   173  	kasAnchor *kas.Anchor
   174  }
   175  
   176  // New creates a new CN object (including the
   177  // initialisation of the common CN object)
   178  func NewSubBridge(ctx *node.ServiceContext, config *SCConfig) (*SubBridge, error) {
   179  	chainDB := CreateDB(ctx, config, "subbridgedata")
   180  
   181  	sb := &SubBridge{
   182  		config:         config,
   183  		chainDB:        chainDB,
   184  		peers:          newBridgePeerSet(),
   185  		newPeerCh:      make(chan BridgePeer),
   186  		addPeerCh:      make(chan struct{}),
   187  		removePeerCh:   make(chan struct{}),
   188  		noMorePeers:    make(chan struct{}),
   189  		eventMux:       ctx.EventMux,
   190  		accountManager: ctx.AccountManager,
   191  		networkId:      config.NetworkId,
   192  		ctx:            ctx,
   193  		chainCh:        make(chan blockchain.ChainEvent, chainEventChanSize),
   194  		logsCh:         make(chan []*types.Log, chainLogChanSize),
   195  		// txCh:            make(chan blockchain.NewTxsEvent, transactionChanSize),
   196  		chanReqVTev:        make(chan RequestValueTransferEvent, chanReqVTevanSize),
   197  		chanReqVTencodedEv: make(chan RequestValueTransferEncodedEvent, chanReqVTevanSize),
   198  		chanHandleVTev:     make(chan *HandleValueTransferEvent, chanHandleVTevanSize),
   199  		quitSync:           make(chan struct{}),
   200  		maxPeers:           config.MaxPeer,
   201  		onAnchoringTx:      config.Anchoring,
   202  		bootFail:           false,
   203  		rpcSendCh:          make(chan []byte),
   204  	}
   205  	// TODO-Klaytn change static config to user define config
   206  	bridgetxConfig := bridgepool.BridgeTxPoolConfig{
   207  		ParentChainID: new(big.Int).SetUint64(config.ParentChainID),
   208  		Journal:       path.Join(config.DataDir, "bridge_transactions.rlp"),
   209  		Rejournal:     time.Hour,
   210  		GlobalQueue:   8192,
   211  	}
   212  
   213  	logger.Info("Initialising Klaytn-Bridge protocol", "network", config.NetworkId)
   214  	sb.APIBackend = &SubBridgeAPI{sb}
   215  
   216  	sb.bridgeTxPool = bridgepool.NewBridgeTxPool(bridgetxConfig)
   217  
   218  	var err error
   219  	sb.bridgeAccounts, err = NewBridgeAccounts(sb.accountManager, config.DataDir, chainDB, sb.config.ServiceChainParentOperatorGasLimit, sb.config.ServiceChainChildOperatorGasLimit)
   220  	if err != nil {
   221  		return nil, err
   222  	}
   223  	sb.handler, err = NewSubBridgeHandler(sb)
   224  	if err != nil {
   225  		return nil, err
   226  	}
   227  	sb.eventhandler, err = NewChildChainEventHandler(sb, sb.handler)
   228  	if err != nil {
   229  		return nil, err
   230  	}
   231  	sb.bridgeAccounts.pAccount.SetChainID(new(big.Int).SetUint64(config.ParentChainID))
   232  
   233  	return sb, nil
   234  }
   235  
   236  func (sb *SubBridge) SetRPCConn(conn net.Conn) {
   237  	sb.rpcConn = conn
   238  
   239  	go func() {
   240  		for {
   241  			data := make([]byte, rpcBufferSize)
   242  			rlen, err := sb.rpcConn.Read(data)
   243  			if err != nil {
   244  				if err == io.EOF {
   245  					logger.Trace("EOF from the rpc pipe")
   246  					time.Sleep(100 * time.Millisecond)
   247  					continue
   248  				} else {
   249  					// If no one closes the pipe, this situation should not happen.
   250  					logger.Error("failed to read from the rpc pipe", "err", err, "rlen", rlen)
   251  					return
   252  				}
   253  			}
   254  			sb.rpcSendCh <- data[:rlen]
   255  		}
   256  	}()
   257  }
   258  
   259  func (sb *SubBridge) SendRPCData(data []byte) error {
   260  	peers := sb.BridgePeerSet().peers
   261  	logger.Trace("send rpc message from the subbridge", "len", len(data), "peers", len(peers))
   262  	for _, peer := range peers {
   263  		err := peer.SendRequestRPC(data)
   264  		if err != nil {
   265  			logger.Error("SendRPCData Error", "err", err)
   266  		}
   267  		return err
   268  	}
   269  	logger.Trace("send rpc message from the subbridge, done")
   270  
   271  	return nil
   272  }
   273  
   274  // implement PeerSetManager
   275  func (sb *SubBridge) BridgePeerSet() *bridgePeerSet {
   276  	return sb.peers
   277  }
   278  
   279  func (sb *SubBridge) GetBridgeTxPool() BridgeTxPool {
   280  	return sb.bridgeTxPool
   281  }
   282  
   283  func (sb *SubBridge) GetAnchoringTx() bool {
   284  	return sb.onAnchoringTx
   285  }
   286  
   287  func (sb *SubBridge) SetAnchoringTx(flag bool) bool {
   288  	if sb.onAnchoringTx != flag && flag {
   289  		sb.handler.txCountStartingBlockNumber = 0
   290  	}
   291  	sb.onAnchoringTx = flag
   292  	return sb.GetAnchoringTx()
   293  }
   294  
   295  // APIs returns the collection of RPC services the ethereum package offers.
   296  // NOTE, some of these services probably need to be moved to somewhere else.
   297  func (sb *SubBridge) APIs() []rpc.API {
   298  	// Append all the local APIs and return
   299  	return []rpc.API{
   300  		{
   301  			Namespace: "subbridge",
   302  			Version:   "1.0",
   303  			Service:   sb.APIBackend,
   304  			Public:    true,
   305  		},
   306  		{
   307  			Namespace: "subbridge",
   308  			Version:   "1.0",
   309  			Service:   sb.netRPCService,
   310  			Public:    true,
   311  		},
   312  	}
   313  }
   314  
   315  func (sb *SubBridge) AccountManager() *accounts.Manager { return sb.accountManager }
   316  func (sb *SubBridge) EventMux() *event.TypeMux          { return sb.eventMux }
   317  func (sb *SubBridge) ChainDB() database.DBManager       { return sb.chainDB }
   318  func (sb *SubBridge) IsListening() bool                 { return true } // Always listening
   319  func (sb *SubBridge) ProtocolVersion() int              { return int(sb.SCProtocol().Versions[0]) }
   320  func (sb *SubBridge) NetVersion() uint64                { return sb.networkId }
   321  
   322  func (sb *SubBridge) Components() []interface{} {
   323  	return nil
   324  }
   325  
   326  func (sb *SubBridge) SetComponents(components []interface{}) {
   327  	for _, component := range components {
   328  		switch v := component.(type) {
   329  		case *blockchain.BlockChain:
   330  			sb.blockchain = v
   331  
   332  			kasConfig := &kas.KASConfig{
   333  				Url:            sb.config.KASAnchorUrl,
   334  				XChainId:       sb.config.KASXChainId,
   335  				User:           sb.config.KASAccessKey,
   336  				Pwd:            sb.config.KASSecretKey,
   337  				Operator:       common.HexToAddress(sb.config.KASAnchorOperator),
   338  				Anchor:         sb.config.KASAnchor,
   339  				AnchorPeriod:   sb.config.KASAnchorPeriod,
   340  				RequestTimeout: sb.config.KASAnchorRequestTimeout,
   341  			}
   342  			sb.kasAnchor = kas.NewKASAnchor(kasConfig, sb.chainDB, v)
   343  
   344  			// event from core-service
   345  			sb.chainSub = sb.blockchain.SubscribeChainEvent(sb.chainCh)
   346  			sb.logsSub = sb.blockchain.SubscribeLogsEvent(sb.logsCh)
   347  			sb.bridgeAccounts.cAccount.SetChainID(v.Config().ChainID)
   348  		case *blockchain.TxPool:
   349  			sb.txPool = v
   350  			// event from core-service
   351  			// sb.txSub = sb.txPool.SubscribeNewTxsEvent(sb.txCh)
   352  		// TODO-Klaytn if need pending block, should use miner
   353  		case *work.Miner:
   354  		}
   355  	}
   356  
   357  	var err error
   358  	if sb.config.EnabledSubBridge {
   359  		sb.remoteBackend, err = NewRemoteBackend(sb)
   360  		if err != nil {
   361  			logger.Error("fail to initialize RemoteBackend", "err", err)
   362  			sb.bootFail = true
   363  			return
   364  		}
   365  	}
   366  	sb.localBackend, err = NewLocalBackend(sb)
   367  	if err != nil {
   368  		logger.Error("fail to initialize LocalBackend", "err", err)
   369  		sb.bootFail = true
   370  		return
   371  	}
   372  
   373  	sb.bridgeManager, err = NewBridgeManager(sb)
   374  	if err != nil {
   375  		logger.Error("fail to initialize BridgeManager", "err", err)
   376  		sb.bootFail = true
   377  		return
   378  	}
   379  	sb.reqVTevSub = sb.bridgeManager.SubscribeReqVTev(sb.chanReqVTev)
   380  	sb.reqVTencodedEvSub = sb.bridgeManager.SubscribeReqVTencodedEv(sb.chanReqVTencodedEv)
   381  	sb.handleVTevSub = sb.bridgeManager.SubscribeHandleVTev(sb.chanHandleVTev)
   382  
   383  	sb.pmwg.Add(1)
   384  	go sb.restoreBridgeLoop()
   385  
   386  	sb.pmwg.Add(1)
   387  	go sb.resetBridgeLoop()
   388  
   389  	sb.bridgeAccounts.cAccount.SetNonce(sb.txPool.GetPendingNonce(sb.bridgeAccounts.cAccount.address))
   390  
   391  	sb.pmwg.Add(1)
   392  	go sb.loop()
   393  }
   394  
   395  // Protocols implements node.Service, returning all the currently configured
   396  // network protocols to start.
   397  func (sb *SubBridge) Protocols() []p2p.Protocol {
   398  	return []p2p.Protocol{}
   399  }
   400  
   401  func (sb *SubBridge) SCProtocol() SCProtocol {
   402  	return SCProtocol{
   403  		Name:     SCProtocolName,
   404  		Versions: SCProtocolVersion,
   405  		Lengths:  SCProtocolLength,
   406  	}
   407  }
   408  
   409  // NodeInfo retrieves some protocol metadata about the running host node.
   410  func (sb *SubBridge) NodeInfo() *SubBridgeInfo {
   411  	currentBlock := sb.blockchain.CurrentBlock()
   412  	return &SubBridgeInfo{
   413  		Network: sb.networkId,
   414  		Genesis: sb.blockchain.Genesis().Hash(),
   415  		Config:  sb.blockchain.Config(),
   416  		Head:    currentBlock.Hash(),
   417  		ChainID: sb.blockchain.Config().ChainID,
   418  	}
   419  }
   420  
   421  // getChainID returns the current chain id.
   422  func (sb *SubBridge) getChainID() *big.Int {
   423  	return sb.blockchain.Config().ChainID
   424  }
   425  
   426  // Start implements node.Service, starting all internal goroutines needed by the
   427  // Klaytn protocol implementation.
   428  func (sb *SubBridge) Start(srvr p2p.Server) error {
   429  	if sb.bootFail {
   430  		return errors.New("subBridge node fail to start")
   431  	}
   432  
   433  	serverConfig := p2p.Config{}
   434  	serverConfig.PrivateKey = sb.ctx.NodeKey()
   435  	serverConfig.Name = sb.ctx.NodeType().String()
   436  	serverConfig.Logger = logger
   437  	serverConfig.NoListen = true
   438  	serverConfig.MaxPhysicalConnections = sb.maxPeers
   439  	serverConfig.NoDiscovery = true
   440  	serverConfig.EnableMultiChannelServer = false
   441  
   442  	// connect to mainbridge as outbound
   443  	serverConfig.StaticNodes = sb.config.MainBridges()
   444  
   445  	p2pServer := p2p.NewServer(serverConfig)
   446  
   447  	sb.bridgeServer = p2pServer
   448  
   449  	scprotocols := make([]p2p.Protocol, 0, len(sb.SCProtocol().Versions))
   450  	for i, version := range sb.SCProtocol().Versions {
   451  		// Compatible; initialise the sub-protocol
   452  		version := version
   453  		scprotocols = append(scprotocols, p2p.Protocol{
   454  			Name:    sb.SCProtocol().Name,
   455  			Version: version,
   456  			Length:  sb.SCProtocol().Lengths[i],
   457  			Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   458  				peer := sb.newPeer(int(version), p, rw)
   459  				pubKey, _ := p.ID().Pubkey()
   460  				addr := crypto.PubkeyToAddress(*pubKey)
   461  				peer.SetAddr(addr)
   462  				select {
   463  				case sb.newPeerCh <- peer:
   464  					return sb.handle(peer)
   465  				case <-sb.quitSync:
   466  					return p2p.DiscQuitting
   467  				}
   468  			},
   469  			NodeInfo: func() interface{} {
   470  				return sb.NodeInfo()
   471  			},
   472  			PeerInfo: func(id discover.NodeID) interface{} {
   473  				if p := sb.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
   474  					return p.Info()
   475  				}
   476  				return nil
   477  			},
   478  		})
   479  	}
   480  	sb.bridgeServer.AddProtocols(scprotocols)
   481  
   482  	if err := p2pServer.Start(); err != nil {
   483  		return errors.New("fail to bridgeserver start")
   484  	}
   485  
   486  	// Start the RPC service
   487  	sb.netRPCService = api.NewPublicNetAPI(sb.bridgeServer, sb.NetVersion())
   488  
   489  	// Figure out a max peers count based on the server limits
   490  	//sb.maxPeers = sb.bridgeServer.MaxPhysicalConnections()
   491  	//validator := func(header *types.Header) error {
   492  	//	return nil
   493  	//}
   494  	//heighter := func() uint64 {
   495  	//	return sb.blockchain.CurrentBlock().NumberU64()
   496  	//}
   497  	//inserter := func(blocks types.Blocks) (int, error) {
   498  	//	return 0, nil
   499  	//}
   500  	//sb.fetcher = fetcher.New(sb.GetBlockByHash, validator, sb.BroadcastBlock, heighter, inserter, sb.removePeer)
   501  
   502  	go sb.syncer()
   503  
   504  	return nil
   505  }
   506  
   507  func (sb *SubBridge) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) BridgePeer {
   508  	return newBridgePeer(pv, p, newMeteredMsgWriter(rw))
   509  }
   510  
   511  func (sb *SubBridge) handle(p BridgePeer) error {
   512  	// Ignore maxPeers if this is a trusted peer
   513  	if sb.peers.Len() >= sb.maxPeers && !p.GetP2PPeer().Info().Networks[p2p.ConnDefault].Trusted {
   514  		return p2p.DiscTooManyPeers
   515  	}
   516  	p.GetP2PPeer().Log().Debug("Klaytn peer connected", "name", p.GetP2PPeer().Name())
   517  
   518  	// Execute the handshake
   519  	var (
   520  		head   = sb.blockchain.CurrentHeader()
   521  		hash   = head.Hash()
   522  		number = head.Number.Uint64()
   523  		td     = sb.blockchain.GetTd(hash, number)
   524  	)
   525  
   526  	err := p.Handshake(sb.networkId, sb.getChainID(), td, hash)
   527  	if err != nil {
   528  		p.GetP2PPeer().Log().Debug("Klaytn peer handshake failed", "err", err)
   529  		fmt.Println(err)
   530  		return err
   531  	}
   532  
   533  	// Register the peer locally
   534  	if err := sb.peers.Register(p); err != nil {
   535  		// if starting node with unlock account, can't register peer until finish unlock
   536  		p.GetP2PPeer().Log().Info("Klaytn peer registration failed", "err", err)
   537  		fmt.Println(err)
   538  		return err
   539  	}
   540  	defer sb.removePeer(p.GetID())
   541  
   542  	sb.handler.RegisterNewPeer(p)
   543  
   544  	p.GetP2PPeer().Log().Info("Added a P2P Peer", "peerID", p.GetP2PPeerID())
   545  
   546  	// main loop. handle incoming messages.
   547  	for {
   548  		if err := sb.handleMsg(p); err != nil {
   549  			p.GetP2PPeer().Log().Debug("Klaytn message handling failed", "err", err)
   550  			return err
   551  		}
   552  	}
   553  }
   554  
   555  func (sb *SubBridge) resetBridgeLoop() {
   556  	defer sb.pmwg.Done()
   557  
   558  	ticker := time.NewTicker(resetBridgeCycle)
   559  	defer ticker.Stop()
   560  
   561  	peerCount := 0
   562  	needResetSubscription := false
   563  
   564  	for {
   565  		select {
   566  		case <-sb.quitSync:
   567  			return
   568  		case <-sb.addPeerCh:
   569  			peerCount++
   570  		case <-sb.removePeerCh:
   571  			peerCount--
   572  			if peerCount == 0 {
   573  				needResetSubscription = true
   574  				sb.handler.setParentOperatorNonceSynced(false)
   575  			}
   576  		case <-ticker.C:
   577  			if needResetSubscription && peerCount > 0 {
   578  				err := sb.bridgeManager.ResetAllSubscribedEvents()
   579  				if err == nil {
   580  					needResetSubscription = false
   581  				}
   582  			}
   583  		}
   584  	}
   585  }
   586  
   587  func (sb *SubBridge) restoreBridgeLoop() {
   588  	defer sb.pmwg.Done()
   589  
   590  	ticker := time.NewTicker(restoreBridgeCycle)
   591  	defer ticker.Stop()
   592  
   593  	for {
   594  		select {
   595  		case <-sb.quitSync:
   596  			return
   597  		case <-ticker.C:
   598  			if err := sb.bridgeManager.RestoreBridges(); err != nil {
   599  				logger.Debug("failed to sb.bridgeManager.RestoreBridges()", "err", err)
   600  				continue
   601  			}
   602  			return
   603  		}
   604  	}
   605  }
   606  
   607  func (sb *SubBridge) loop() {
   608  	defer sb.pmwg.Done()
   609  
   610  	// Keep waiting for and reacting to the various events
   611  	for {
   612  		select {
   613  		case sendData := <-sb.rpcSendCh:
   614  			sb.SendRPCData(sendData)
   615  		// Handle ChainHeadEvent
   616  		case ev := <-sb.chainCh:
   617  			if ev.Block != nil {
   618  				if err := sb.eventhandler.HandleChainHeadEvent(ev.Block); err != nil {
   619  					logger.Error("subbridge block event", "err", err)
   620  				}
   621  
   622  				sb.kasAnchor.AnchorPeriodicBlock(ev.Block)
   623  			} else {
   624  				logger.Error("subbridge block event is nil")
   625  			}
   626  		// Handle NewTexsEvent
   627  		// case ev := <-sb.txCh:
   628  		//	if ev.Txs != nil {
   629  		//		if err := sb.eventhandler.HandleTxsEvent(ev.Txs); err != nil {
   630  		//			logger.Error("subbridge tx event", "err", err)
   631  		//		}
   632  		//	} else {
   633  		//		logger.Error("subbridge tx event is nil")
   634  		//	}
   635  		// Handle ChainLogsEvent
   636  		case logs := <-sb.logsCh:
   637  			if err := sb.eventhandler.HandleLogsEvent(logs); err != nil {
   638  				logger.Error("subbridge log event", "err", err)
   639  			}
   640  		// Handle Bridge Event
   641  		case ev := <-sb.chanReqVTev:
   642  			vtRequestEventMeter.Mark(1)
   643  			if err := sb.eventhandler.ProcessRequestEvent(ev); err != nil {
   644  				logger.Error("fail to process request value transfer event ", "err", err)
   645  			}
   646  		case ev := <-sb.chanReqVTencodedEv:
   647  			vtRequestEventMeter.Mark(1)
   648  			if err := sb.eventhandler.ProcessRequestEvent(ev); err != nil {
   649  				logger.Error("fail to process request value transfer event ", "err", err)
   650  			}
   651  		case ev := <-sb.chanHandleVTev:
   652  			vtHandleEventMeter.Mark(1)
   653  			if err := sb.eventhandler.ProcessHandleEvent(ev); err != nil {
   654  				logger.Error("fail to process handle value transfer event ", "err", err)
   655  			}
   656  		case err := <-sb.chainSub.Err():
   657  			if err != nil {
   658  				logger.Error("subbridge block subscription ", "err", err)
   659  			}
   660  			return
   661  		// case err := <-sb.txSub.Err():
   662  		//	if err != nil {
   663  		//		logger.Error("subbridge tx subscription ", "err", err)
   664  		//	}
   665  		//	return
   666  		case err := <-sb.logsSub.Err():
   667  			if err != nil {
   668  				logger.Error("subbridge log subscription ", "err", err)
   669  			}
   670  			return
   671  		case err := <-sb.reqVTevSub.Err():
   672  			if err != nil {
   673  				logger.Error("subbridge token-received subscription ", "err", err)
   674  			}
   675  			return
   676  		case err := <-sb.reqVTencodedEvSub.Err():
   677  			if err != nil {
   678  				logger.Error("subbridge token-received subscription ", "err", err)
   679  			}
   680  			return
   681  		case err := <-sb.handleVTevSub.Err():
   682  			if err != nil {
   683  				logger.Error("subbridge token-transfer subscription ", "err", err)
   684  			}
   685  			return
   686  		}
   687  	}
   688  }
   689  
   690  func (sb *SubBridge) removePeer(id string) {
   691  	sb.removePeerCh <- struct{}{}
   692  
   693  	// Short circuit if the peer was already removed
   694  	peer := sb.peers.Peer(id)
   695  	if peer == nil {
   696  		return
   697  	}
   698  	logger.Debug("Removing Klaytn peer", "peer", id)
   699  
   700  	if err := sb.peers.Unregister(id); err != nil {
   701  		logger.Error("Peer removal failed", "peer", id, "err", err)
   702  	}
   703  	// Hard disconnect at the networking layer
   704  	if peer != nil {
   705  		peer.GetP2PPeer().Disconnect(p2p.DiscUselessPeer)
   706  	}
   707  }
   708  
   709  // handleMsg is invoked whenever an inbound message is received from a remote
   710  // peer. The remote connection is torn down upon returning any error.
   711  func (sb *SubBridge) handleMsg(p BridgePeer) error {
   712  	// Below message size checking is done by handle().
   713  	// Read the next message from the remote peer, and ensure it's fully consumed
   714  	msg, err := p.GetRW().ReadMsg()
   715  	if err != nil {
   716  		p.GetP2PPeer().Log().Warn("ProtocolManager failed to read msg", "err", err)
   717  		return err
   718  	}
   719  	if msg.Size > ProtocolMaxMsgSize {
   720  		err := errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
   721  		p.GetP2PPeer().Log().Warn("ProtocolManager over max msg size", "err", err)
   722  		return err
   723  	}
   724  	defer msg.Discard()
   725  
   726  	return sb.handler.HandleMainMsg(p, msg)
   727  }
   728  
   729  func (sb *SubBridge) syncer() {
   730  	// Start and ensure cleanup of sync mechanisms
   731  	// pm.fetcher.Start()
   732  	// defer pm.fetcher.Stop()
   733  	// defer pm.downloader.Terminate()
   734  
   735  	// Wait for different events to fire synchronisation operations
   736  	forceSync := time.NewTicker(forceSyncCycle)
   737  	defer forceSync.Stop()
   738  
   739  	for {
   740  		select {
   741  		case peer := <-sb.newPeerCh:
   742  			go sb.synchronise(peer)
   743  
   744  		case <-forceSync.C:
   745  			// Force a sync even if not enough peers are present
   746  			go sb.synchronise(sb.peers.BestPeer())
   747  
   748  		case <-sb.noMorePeers:
   749  			return
   750  		}
   751  	}
   752  }
   753  
   754  func (sb *SubBridge) synchronise(peer BridgePeer) {
   755  	// @TODO Klaytn ServiceChain Sync
   756  }
   757  
   758  // Stop implements node.Service, terminating all internal goroutines used by the
   759  // Klaytn protocol.
   760  func (sb *SubBridge) Stop() error {
   761  	close(sb.quitSync)
   762  	sb.bridgeManager.stopAllRecoveries()
   763  
   764  	sb.chainSub.Unsubscribe()
   765  	// sb.txSub.Unsubscribe()
   766  	sb.logsSub.Unsubscribe()
   767  	sb.reqVTevSub.Unsubscribe()
   768  	sb.reqVTencodedEvSub.Unsubscribe()
   769  	sb.handleVTevSub.Unsubscribe()
   770  	sb.eventMux.Stop()
   771  	sb.chainDB.Close()
   772  
   773  	sb.bridgeManager.Stop()
   774  	sb.bridgeTxPool.Stop()
   775  	sb.bridgeServer.Stop()
   776  
   777  	return nil
   778  }