github.com/bloxroute-labs/bor@v0.1.4/les/server.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"crypto/ecdsa"
    21  	"sync"
    22  	"time"
    23  
    24  	"github.com/maticnetwork/bor/accounts/abi/bind"
    25  	"github.com/maticnetwork/bor/common"
    26  	"github.com/maticnetwork/bor/common/mclock"
    27  	"github.com/maticnetwork/bor/core"
    28  	"github.com/maticnetwork/bor/core/rawdb"
    29  	"github.com/maticnetwork/bor/core/types"
    30  	"github.com/maticnetwork/bor/eth"
    31  	"github.com/maticnetwork/bor/les/flowcontrol"
    32  	"github.com/maticnetwork/bor/light"
    33  	"github.com/maticnetwork/bor/log"
    34  	"github.com/maticnetwork/bor/p2p"
    35  	"github.com/maticnetwork/bor/p2p/discv5"
    36  	"github.com/maticnetwork/bor/params"
    37  	"github.com/maticnetwork/bor/rpc"
    38  )
    39  
    40  const bufLimitRatio = 6000 // fixed bufLimit/MRR ratio
    41  
    42  type LesServer struct {
    43  	lesCommons
    44  
    45  	archiveMode bool // Flag whether the ethereum node runs in archive mode.
    46  
    47  	fcManager    *flowcontrol.ClientManager // nil if our node is client only
    48  	costTracker  *costTracker
    49  	testCost     uint64
    50  	defParams    flowcontrol.ServerParams
    51  	lesTopics    []discv5.Topic
    52  	privateKey   *ecdsa.PrivateKey
    53  	quitSync     chan struct{}
    54  	onlyAnnounce bool
    55  
    56  	thcNormal, thcBlockProcessing int // serving thread count for normal operation and block processing mode
    57  
    58  	maxPeers                   int
    59  	minCapacity, freeClientCap uint64
    60  	freeClientPool             *freeClientPool
    61  }
    62  
    63  func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) {
    64  	lesTopics := make([]discv5.Topic, len(AdvertiseProtocolVersions))
    65  	for i, pv := range AdvertiseProtocolVersions {
    66  		lesTopics[i] = lesTopic(e.BlockChain().Genesis().Hash(), pv)
    67  	}
    68  	quitSync := make(chan struct{})
    69  	srv := &LesServer{
    70  		lesCommons: lesCommons{
    71  			config:           config,
    72  			iConfig:          light.DefaultServerIndexerConfig,
    73  			chainDb:          e.ChainDb(),
    74  			chtIndexer:       light.NewChtIndexer(e.ChainDb(), nil, params.CHTFrequency, params.HelperTrieProcessConfirmations),
    75  			bloomTrieIndexer: light.NewBloomTrieIndexer(e.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency),
    76  		},
    77  		archiveMode:  e.ArchiveMode(),
    78  		quitSync:     quitSync,
    79  		lesTopics:    lesTopics,
    80  		onlyAnnounce: config.UltraLightOnlyAnnounce,
    81  	}
    82  	srv.costTracker, srv.minCapacity = newCostTracker(e.ChainDb(), config)
    83  
    84  	logger := log.New()
    85  	srv.thcNormal = config.LightServ * 4 / 100
    86  	if srv.thcNormal < 4 {
    87  		srv.thcNormal = 4
    88  	}
    89  	srv.thcBlockProcessing = config.LightServ/100 + 1
    90  	srv.fcManager = flowcontrol.NewClientManager(nil, &mclock.System{})
    91  
    92  	checkpoint := srv.latestLocalCheckpoint()
    93  	if !checkpoint.Empty() {
    94  		logger.Info("Loaded latest checkpoint", "section", checkpoint.SectionIndex, "head", checkpoint.SectionHead,
    95  			"chtroot", checkpoint.CHTRoot, "bloomroot", checkpoint.BloomRoot)
    96  	}
    97  
    98  	srv.chtIndexer.Start(e.BlockChain())
    99  
   100  	oracle := config.CheckpointOracle
   101  	if oracle == nil {
   102  		oracle = params.CheckpointOracles[e.BlockChain().Genesis().Hash()]
   103  	}
   104  	registrar := newCheckpointOracle(oracle, srv.getLocalCheckpoint)
   105  	// TODO(rjl493456442) Checkpoint is useless for les server, separate handler for client and server.
   106  	pm, err := NewProtocolManager(e.BlockChain().Config(), nil, light.DefaultServerIndexerConfig, config.UltraLightServers, config.UltraLightFraction, false, config.NetworkId, e.EventMux(), newPeerSet(), e.BlockChain(), e.TxPool(), e.ChainDb(), nil, nil, registrar, quitSync, new(sync.WaitGroup), e.Synced)
   107  	if err != nil {
   108  		return nil, err
   109  	}
   110  	srv.protocolManager = pm
   111  	pm.servingQueue = newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100)
   112  	pm.server = srv
   113  
   114  	return srv, nil
   115  }
   116  
   117  func (s *LesServer) APIs() []rpc.API {
   118  	return []rpc.API{
   119  		{
   120  			Namespace: "les",
   121  			Version:   "1.0",
   122  			Service:   NewPrivateLightAPI(&s.lesCommons, s.protocolManager.reg),
   123  			Public:    false,
   124  		},
   125  	}
   126  }
   127  
   128  // startEventLoop starts an event handler loop that updates the recharge curve of
   129  // the client manager and adjusts the client pool's size according to the total
   130  // capacity updates coming from the client manager
   131  func (s *LesServer) startEventLoop() {
   132  	s.protocolManager.wg.Add(1)
   133  
   134  	var (
   135  		processing, procLast bool
   136  		procStarted          time.Time
   137  	)
   138  	blockProcFeed := make(chan bool, 100)
   139  	s.protocolManager.blockchain.(*core.BlockChain).SubscribeBlockProcessingEvent(blockProcFeed)
   140  	totalRechargeCh := make(chan uint64, 100)
   141  	totalRecharge := s.costTracker.subscribeTotalRecharge(totalRechargeCh)
   142  	totalCapacityCh := make(chan uint64, 100)
   143  	updateRecharge := func() {
   144  		if processing {
   145  			if !procLast {
   146  				procStarted = time.Now()
   147  			}
   148  			s.protocolManager.servingQueue.setThreads(s.thcBlockProcessing)
   149  			s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge, totalRecharge}})
   150  		} else {
   151  			if procLast {
   152  				blockProcessingTimer.UpdateSince(procStarted)
   153  			}
   154  			s.protocolManager.servingQueue.setThreads(s.thcNormal)
   155  			s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge / 16, totalRecharge / 2}, {totalRecharge / 2, totalRecharge / 2}, {totalRecharge, totalRecharge}})
   156  		}
   157  		procLast = processing
   158  	}
   159  	updateRecharge()
   160  	totalCapacity := s.fcManager.SubscribeTotalCapacity(totalCapacityCh)
   161  	s.freeClientPool.setLimits(s.maxPeers, totalCapacity)
   162  
   163  	var maxFreePeers uint64
   164  	go func() {
   165  		for {
   166  			select {
   167  			case processing = <-blockProcFeed:
   168  				updateRecharge()
   169  			case totalRecharge = <-totalRechargeCh:
   170  				updateRecharge()
   171  			case totalCapacity = <-totalCapacityCh:
   172  				totalCapacityGauge.Update(int64(totalCapacity))
   173  				newFreePeers := totalCapacity / s.freeClientCap
   174  				if newFreePeers < maxFreePeers && newFreePeers < uint64(s.maxPeers) {
   175  					log.Warn("Reduced total capacity", "maxFreePeers", newFreePeers)
   176  				}
   177  				maxFreePeers = newFreePeers
   178  				s.freeClientPool.setLimits(s.maxPeers, totalCapacity)
   179  			case <-s.protocolManager.quitSync:
   180  				s.protocolManager.wg.Done()
   181  				return
   182  			}
   183  		}
   184  	}()
   185  }
   186  
   187  func (s *LesServer) Protocols() []p2p.Protocol {
   188  	return s.makeProtocols(ServerProtocolVersions)
   189  }
   190  
   191  // Start starts the LES server
   192  func (s *LesServer) Start(srvr *p2p.Server) {
   193  	s.maxPeers = s.config.LightPeers
   194  	totalRecharge := s.costTracker.totalRecharge()
   195  	if s.maxPeers > 0 {
   196  		s.freeClientCap = s.minCapacity //totalRecharge / uint64(s.maxPeers)
   197  		if s.freeClientCap < s.minCapacity {
   198  			s.freeClientCap = s.minCapacity
   199  		}
   200  		if s.freeClientCap > 0 {
   201  			s.defParams = flowcontrol.ServerParams{
   202  				BufLimit:    s.freeClientCap * bufLimitRatio,
   203  				MinRecharge: s.freeClientCap,
   204  			}
   205  		}
   206  	}
   207  
   208  	maxCapacity := s.freeClientCap * uint64(s.maxPeers)
   209  	if totalRecharge > maxCapacity {
   210  		maxCapacity = totalRecharge
   211  	}
   212  	s.fcManager.SetCapacityLimits(s.freeClientCap, maxCapacity, s.freeClientCap*2)
   213  	s.freeClientPool = newFreeClientPool(s.chainDb, s.freeClientCap, 10000, mclock.System{}, func(id string) { go s.protocolManager.removePeer(id) })
   214  	s.protocolManager.peers.notify(s.freeClientPool)
   215  
   216  	s.startEventLoop()
   217  	s.protocolManager.Start(s.config.LightPeers)
   218  	if srvr.DiscV5 != nil {
   219  		for _, topic := range s.lesTopics {
   220  			topic := topic
   221  			go func() {
   222  				logger := log.New("topic", topic)
   223  				logger.Info("Starting topic registration")
   224  				defer logger.Info("Terminated topic registration")
   225  
   226  				srvr.DiscV5.RegisterTopic(topic, s.quitSync)
   227  			}()
   228  		}
   229  	}
   230  	s.privateKey = srvr.PrivateKey
   231  	s.protocolManager.blockLoop()
   232  }
   233  
   234  func (s *LesServer) SetBloomBitsIndexer(bloomIndexer *core.ChainIndexer) {
   235  	bloomIndexer.AddChildIndexer(s.bloomTrieIndexer)
   236  }
   237  
   238  // SetClient sets the rpc client and starts running checkpoint contract if it is not yet watched.
   239  func (s *LesServer) SetContractBackend(backend bind.ContractBackend) {
   240  	if s.protocolManager.reg != nil {
   241  		s.protocolManager.reg.start(backend)
   242  	}
   243  }
   244  
   245  // Stop stops the LES service
   246  func (s *LesServer) Stop() {
   247  	s.fcManager.Stop()
   248  	s.chtIndexer.Close()
   249  	// bloom trie indexer is closed by parent bloombits indexer
   250  	go func() {
   251  		<-s.protocolManager.noMorePeers
   252  	}()
   253  	s.freeClientPool.stop()
   254  	s.costTracker.stop()
   255  	s.protocolManager.Stop()
   256  }
   257  
   258  // todo(rjl493456442) separate client and server implementation.
   259  func (pm *ProtocolManager) blockLoop() {
   260  	pm.wg.Add(1)
   261  	headCh := make(chan core.ChainHeadEvent, 10)
   262  	headSub := pm.blockchain.SubscribeChainHeadEvent(headCh)
   263  	go func() {
   264  		var lastHead *types.Header
   265  		lastBroadcastTd := common.Big0
   266  		for {
   267  			select {
   268  			case ev := <-headCh:
   269  				peers := pm.peers.AllPeers()
   270  				if len(peers) > 0 {
   271  					header := ev.Block.Header()
   272  					hash := header.Hash()
   273  					number := header.Number.Uint64()
   274  					td := rawdb.ReadTd(pm.chainDb, hash, number)
   275  					if td != nil && td.Cmp(lastBroadcastTd) > 0 {
   276  						var reorg uint64
   277  						if lastHead != nil {
   278  							reorg = lastHead.Number.Uint64() - rawdb.FindCommonAncestor(pm.chainDb, header, lastHead).Number.Uint64()
   279  						}
   280  						lastHead = header
   281  						lastBroadcastTd = td
   282  
   283  						log.Debug("Announcing block to peers", "number", number, "hash", hash, "td", td, "reorg", reorg)
   284  
   285  						announce := announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg}
   286  						var (
   287  							signed         bool
   288  							signedAnnounce announceData
   289  						)
   290  
   291  						for _, p := range peers {
   292  							p := p
   293  							switch p.announceType {
   294  							case announceTypeSimple:
   295  								p.queueSend(func() { p.SendAnnounce(announce) })
   296  							case announceTypeSigned:
   297  								if !signed {
   298  									signedAnnounce = announce
   299  									signedAnnounce.sign(pm.server.privateKey)
   300  									signed = true
   301  								}
   302  								p.queueSend(func() { p.SendAnnounce(signedAnnounce) })
   303  							}
   304  						}
   305  					}
   306  				}
   307  			case <-pm.quitSync:
   308  				headSub.Unsubscribe()
   309  				pm.wg.Done()
   310  				return
   311  			}
   312  		}
   313  	}()
   314  }