github.com/ethereum-optimism/optimism/l2geth@v0.0.0-20230612200230-50b04ade19e3/les/server.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"crypto/ecdsa"
    21  	"time"
    22  
    23  	"github.com/ethereum-optimism/optimism/l2geth/accounts/abi/bind"
    24  	"github.com/ethereum-optimism/optimism/l2geth/common/mclock"
    25  	"github.com/ethereum-optimism/optimism/l2geth/core"
    26  	"github.com/ethereum-optimism/optimism/l2geth/eth"
    27  	"github.com/ethereum-optimism/optimism/l2geth/les/checkpointoracle"
    28  	"github.com/ethereum-optimism/optimism/l2geth/les/flowcontrol"
    29  	"github.com/ethereum-optimism/optimism/l2geth/light"
    30  	"github.com/ethereum-optimism/optimism/l2geth/log"
    31  	"github.com/ethereum-optimism/optimism/l2geth/p2p"
    32  	"github.com/ethereum-optimism/optimism/l2geth/p2p/discv5"
    33  	"github.com/ethereum-optimism/optimism/l2geth/p2p/enode"
    34  	"github.com/ethereum-optimism/optimism/l2geth/p2p/enr"
    35  	"github.com/ethereum-optimism/optimism/l2geth/params"
    36  	"github.com/ethereum-optimism/optimism/l2geth/rpc"
    37  )
    38  
    39  type LesServer struct {
    40  	lesCommons
    41  
    42  	archiveMode bool // Flag whether the ethereum node runs in archive mode.
    43  	handler     *serverHandler
    44  	lesTopics   []discv5.Topic
    45  	privateKey  *ecdsa.PrivateKey
    46  
    47  	// Flow control and capacity management
    48  	fcManager    *flowcontrol.ClientManager
    49  	costTracker  *costTracker
    50  	defParams    flowcontrol.ServerParams
    51  	servingQueue *servingQueue
    52  	clientPool   *clientPool
    53  
    54  	minCapacity, maxCapacity, freeCapacity uint64
    55  	threadsIdle                            int // Request serving threads count when system is idle.
    56  	threadsBusy                            int // Request serving threads count when system is busy(block insertion).
    57  }
    58  
    59  func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) {
    60  	// Collect les protocol version information supported by local node.
    61  	lesTopics := make([]discv5.Topic, len(AdvertiseProtocolVersions))
    62  	for i, pv := range AdvertiseProtocolVersions {
    63  		lesTopics[i] = lesTopic(e.BlockChain().Genesis().Hash(), pv)
    64  	}
    65  	// Calculate the number of threads used to service the light client
    66  	// requests based on the user-specified value.
    67  	threads := config.LightServ * 4 / 100
    68  	if threads < 4 {
    69  		threads = 4
    70  	}
    71  	srv := &LesServer{
    72  		lesCommons: lesCommons{
    73  			genesis:          e.BlockChain().Genesis().Hash(),
    74  			config:           config,
    75  			chainConfig:      e.BlockChain().Config(),
    76  			iConfig:          light.DefaultServerIndexerConfig,
    77  			chainDb:          e.ChainDb(),
    78  			peers:            newPeerSet(),
    79  			chainReader:      e.BlockChain(),
    80  			chtIndexer:       light.NewChtIndexer(e.ChainDb(), nil, params.CHTFrequency, params.HelperTrieProcessConfirmations),
    81  			bloomTrieIndexer: light.NewBloomTrieIndexer(e.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency),
    82  			closeCh:          make(chan struct{}),
    83  		},
    84  		archiveMode:  e.ArchiveMode(),
    85  		lesTopics:    lesTopics,
    86  		fcManager:    flowcontrol.NewClientManager(nil, &mclock.System{}),
    87  		servingQueue: newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100),
    88  		threadsBusy:  config.LightServ/100 + 1,
    89  		threadsIdle:  threads,
    90  	}
    91  	srv.handler = newServerHandler(srv, e.BlockChain(), e.ChainDb(), e.TxPool(), e.Synced)
    92  	srv.costTracker, srv.minCapacity = newCostTracker(e.ChainDb(), config)
    93  	srv.freeCapacity = srv.minCapacity
    94  
    95  	// Set up checkpoint oracle.
    96  	oracle := config.CheckpointOracle
    97  	if oracle == nil {
    98  		oracle = params.CheckpointOracles[e.BlockChain().Genesis().Hash()]
    99  	}
   100  	srv.oracle = checkpointoracle.New(oracle, srv.localCheckpoint)
   101  
   102  	// Initialize server capacity management fields.
   103  	srv.defParams = flowcontrol.ServerParams{
   104  		BufLimit:    srv.freeCapacity * bufLimitRatio,
   105  		MinRecharge: srv.freeCapacity,
   106  	}
   107  	// LES flow control tries to more or less guarantee the possibility for the
   108  	// clients to send a certain amount of requests at any time and get a quick
   109  	// response. Most of the clients want this guarantee but don't actually need
   110  	// to send requests most of the time. Our goal is to serve as many clients as
   111  	// possible while the actually used server capacity does not exceed the limits
   112  	totalRecharge := srv.costTracker.totalRecharge()
   113  	srv.maxCapacity = srv.freeCapacity * uint64(srv.config.LightPeers)
   114  	if totalRecharge > srv.maxCapacity {
   115  		srv.maxCapacity = totalRecharge
   116  	}
   117  	srv.fcManager.SetCapacityLimits(srv.freeCapacity, srv.maxCapacity, srv.freeCapacity*2)
   118  	srv.clientPool = newClientPool(srv.chainDb, srv.freeCapacity, mclock.System{}, func(id enode.ID) { go srv.peers.Unregister(peerIdToString(id)) })
   119  	srv.clientPool.setDefaultFactors(priceFactors{0, 1, 1}, priceFactors{0, 1, 1})
   120  
   121  	checkpoint := srv.latestLocalCheckpoint()
   122  	if !checkpoint.Empty() {
   123  		log.Info("Loaded latest checkpoint", "section", checkpoint.SectionIndex, "head", checkpoint.SectionHead,
   124  			"chtroot", checkpoint.CHTRoot, "bloomroot", checkpoint.BloomRoot)
   125  	}
   126  	srv.chtIndexer.Start(e.BlockChain())
   127  	return srv, nil
   128  }
   129  
   130  func (s *LesServer) APIs() []rpc.API {
   131  	return []rpc.API{
   132  		{
   133  			Namespace: "les",
   134  			Version:   "1.0",
   135  			Service:   NewPrivateLightAPI(&s.lesCommons),
   136  			Public:    false,
   137  		},
   138  		{
   139  			Namespace: "les",
   140  			Version:   "1.0",
   141  			Service:   NewPrivateLightServerAPI(s),
   142  			Public:    false,
   143  		},
   144  		{
   145  			Namespace: "debug",
   146  			Version:   "1.0",
   147  			Service:   NewPrivateDebugAPI(s),
   148  			Public:    false,
   149  		},
   150  	}
   151  }
   152  
   153  func (s *LesServer) Protocols() []p2p.Protocol {
   154  	ps := s.makeProtocols(ServerProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} {
   155  		if p := s.peers.Peer(peerIdToString(id)); p != nil {
   156  			return p.Info()
   157  		}
   158  		return nil
   159  	})
   160  	// Add "les" ENR entries.
   161  	for i := range ps {
   162  		ps[i].Attributes = []enr.Entry{&lesEntry{}}
   163  	}
   164  	return ps
   165  }
   166  
   167  // Start starts the LES server
   168  func (s *LesServer) Start(srvr *p2p.Server) {
   169  	s.privateKey = srvr.PrivateKey
   170  	s.handler.start()
   171  
   172  	s.wg.Add(1)
   173  	go s.capacityManagement()
   174  
   175  	if srvr.DiscV5 != nil {
   176  		for _, topic := range s.lesTopics {
   177  			topic := topic
   178  			go func() {
   179  				logger := log.New("topic", topic)
   180  				logger.Info("Starting topic registration")
   181  				defer logger.Info("Terminated topic registration")
   182  
   183  				srvr.DiscV5.RegisterTopic(topic, s.closeCh)
   184  			}()
   185  		}
   186  	}
   187  }
   188  
   189  // Stop stops the LES service
   190  func (s *LesServer) Stop() {
   191  	close(s.closeCh)
   192  
   193  	// Disconnect existing sessions.
   194  	// This also closes the gate for any new registrations on the peer set.
   195  	// sessions which are already established but not added to pm.peers yet
   196  	// will exit when they try to register.
   197  	s.peers.Close()
   198  
   199  	s.fcManager.Stop()
   200  	s.costTracker.stop()
   201  	s.handler.stop()
   202  	s.clientPool.stop() // client pool should be closed after handler.
   203  	s.servingQueue.stop()
   204  
   205  	// Note, bloom trie indexer is closed by parent bloombits indexer.
   206  	s.chtIndexer.Close()
   207  	s.wg.Wait()
   208  	log.Info("Les server stopped")
   209  }
   210  
   211  func (s *LesServer) SetBloomBitsIndexer(bloomIndexer *core.ChainIndexer) {
   212  	bloomIndexer.AddChildIndexer(s.bloomTrieIndexer)
   213  }
   214  
   215  // SetClient sets the rpc client and starts running checkpoint contract if it is not yet watched.
   216  func (s *LesServer) SetContractBackend(backend bind.ContractBackend) {
   217  	if s.oracle == nil {
   218  		return
   219  	}
   220  	s.oracle.Start(backend)
   221  }
   222  
   223  // capacityManagement starts an event handler loop that updates the recharge curve of
   224  // the client manager and adjusts the client pool's size according to the total
   225  // capacity updates coming from the client manager
   226  func (s *LesServer) capacityManagement() {
   227  	defer s.wg.Done()
   228  
   229  	processCh := make(chan bool, 100)
   230  	sub := s.handler.blockchain.SubscribeBlockProcessingEvent(processCh)
   231  	defer sub.Unsubscribe()
   232  
   233  	totalRechargeCh := make(chan uint64, 100)
   234  	totalRecharge := s.costTracker.subscribeTotalRecharge(totalRechargeCh)
   235  
   236  	totalCapacityCh := make(chan uint64, 100)
   237  	totalCapacity := s.fcManager.SubscribeTotalCapacity(totalCapacityCh)
   238  	s.clientPool.setLimits(s.config.LightPeers, totalCapacity)
   239  
   240  	var (
   241  		busy         bool
   242  		freePeers    uint64
   243  		blockProcess mclock.AbsTime
   244  	)
   245  	updateRecharge := func() {
   246  		if busy {
   247  			s.servingQueue.setThreads(s.threadsBusy)
   248  			s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge, totalRecharge}})
   249  		} else {
   250  			s.servingQueue.setThreads(s.threadsIdle)
   251  			s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge / 10, totalRecharge}, {totalRecharge, totalRecharge}})
   252  		}
   253  	}
   254  	updateRecharge()
   255  
   256  	for {
   257  		select {
   258  		case busy = <-processCh:
   259  			if busy {
   260  				blockProcess = mclock.Now()
   261  			} else {
   262  				blockProcessingTimer.Update(time.Duration(mclock.Now() - blockProcess))
   263  			}
   264  			updateRecharge()
   265  		case totalRecharge = <-totalRechargeCh:
   266  			totalRechargeGauge.Update(int64(totalRecharge))
   267  			updateRecharge()
   268  		case totalCapacity = <-totalCapacityCh:
   269  			totalCapacityGauge.Update(int64(totalCapacity))
   270  			newFreePeers := totalCapacity / s.freeCapacity
   271  			if newFreePeers < freePeers && newFreePeers < uint64(s.config.LightPeers) {
   272  				log.Warn("Reduced free peer connections", "from", freePeers, "to", newFreePeers)
   273  			}
   274  			freePeers = newFreePeers
   275  			s.clientPool.setLimits(s.config.LightPeers, totalCapacity)
   276  		case <-s.closeCh:
   277  			return
   278  		}
   279  	}
   280  }