github.com/daeglee/go-ethereum@v0.0.0-20190504220456-cad3e8d18e9b/les/server.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"crypto/ecdsa"
    21  	"sync"
    22  
    23  	"github.com/ethereum/go-ethereum/common"
    24  	"github.com/ethereum/go-ethereum/common/mclock"
    25  	"github.com/ethereum/go-ethereum/core"
    26  	"github.com/ethereum/go-ethereum/core/rawdb"
    27  	"github.com/ethereum/go-ethereum/core/types"
    28  	"github.com/ethereum/go-ethereum/eth"
    29  	"github.com/ethereum/go-ethereum/les/flowcontrol"
    30  	"github.com/ethereum/go-ethereum/light"
    31  	"github.com/ethereum/go-ethereum/log"
    32  	"github.com/ethereum/go-ethereum/p2p"
    33  	"github.com/ethereum/go-ethereum/p2p/discv5"
    34  	"github.com/ethereum/go-ethereum/params"
    35  	"github.com/ethereum/go-ethereum/rpc"
    36  )
    37  
    38  const bufLimitRatio = 6000 // fixed bufLimit/MRR ratio
    39  
    40  type LesServer struct {
    41  	lesCommons
    42  
    43  	fcManager    *flowcontrol.ClientManager // nil if our node is client only
    44  	costTracker  *costTracker
    45  	defParams    flowcontrol.ServerParams
    46  	lesTopics    []discv5.Topic
    47  	privateKey   *ecdsa.PrivateKey
    48  	quitSync     chan struct{}
    49  	onlyAnnounce bool
    50  
    51  	thcNormal, thcBlockProcessing int // serving thread count for normal operation and block processing mode
    52  
    53  	maxPeers           int
    54  	freeClientCap      uint64
    55  	freeClientPool     *freeClientPool
    56  	priorityClientPool *priorityClientPool
    57  }
    58  
    59  func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
    60  	quitSync := make(chan struct{})
    61  	pm, err := NewProtocolManager(
    62  		eth.BlockChain().Config(),
    63  		light.DefaultServerIndexerConfig,
    64  		false,
    65  		config.NetworkId,
    66  		eth.EventMux(),
    67  		eth.Engine(),
    68  		newPeerSet(),
    69  		eth.BlockChain(),
    70  		eth.TxPool(),
    71  		eth.ChainDb(),
    72  		nil,
    73  		nil,
    74  		nil,
    75  		quitSync,
    76  		new(sync.WaitGroup),
    77  		config.ULC)
    78  	if err != nil {
    79  		return nil, err
    80  	}
    81  
    82  	lesTopics := make([]discv5.Topic, len(AdvertiseProtocolVersions))
    83  	for i, pv := range AdvertiseProtocolVersions {
    84  		lesTopics[i] = lesTopic(eth.BlockChain().Genesis().Hash(), pv)
    85  	}
    86  
    87  	srv := &LesServer{
    88  		lesCommons: lesCommons{
    89  			config:           config,
    90  			chainDb:          eth.ChainDb(),
    91  			iConfig:          light.DefaultServerIndexerConfig,
    92  			chtIndexer:       light.NewChtIndexer(eth.ChainDb(), nil, params.CHTFrequencyServer, params.HelperTrieProcessConfirmations),
    93  			bloomTrieIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency),
    94  			protocolManager:  pm,
    95  		},
    96  		costTracker:  newCostTracker(eth.ChainDb(), config),
    97  		quitSync:     quitSync,
    98  		lesTopics:    lesTopics,
    99  		onlyAnnounce: config.OnlyAnnounce,
   100  	}
   101  
   102  	logger := log.New()
   103  	pm.server = srv
   104  	srv.thcNormal = config.LightServ * 4 / 100
   105  	if srv.thcNormal < 4 {
   106  		srv.thcNormal = 4
   107  	}
   108  	srv.thcBlockProcessing = config.LightServ/100 + 1
   109  	srv.fcManager = flowcontrol.NewClientManager(nil, &mclock.System{})
   110  
   111  	chtV1SectionCount, _, _ := srv.chtIndexer.Sections() // indexer still uses LES/1 4k section size for backwards server compatibility
   112  	chtV2SectionCount := chtV1SectionCount / (params.CHTFrequencyClient / params.CHTFrequencyServer)
   113  	if chtV2SectionCount != 0 {
   114  		// convert to LES/2 section
   115  		chtLastSection := chtV2SectionCount - 1
   116  		// convert last LES/2 section index back to LES/1 index for chtIndexer.SectionHead
   117  		chtLastSectionV1 := (chtLastSection+1)*(params.CHTFrequencyClient/params.CHTFrequencyServer) - 1
   118  		chtSectionHead := srv.chtIndexer.SectionHead(chtLastSectionV1)
   119  		chtRoot := light.GetChtRoot(pm.chainDb, chtLastSectionV1, chtSectionHead)
   120  		logger.Info("Loaded CHT", "section", chtLastSection, "head", chtSectionHead, "root", chtRoot)
   121  	}
   122  	bloomTrieSectionCount, _, _ := srv.bloomTrieIndexer.Sections()
   123  	if bloomTrieSectionCount != 0 {
   124  		bloomTrieLastSection := bloomTrieSectionCount - 1
   125  		bloomTrieSectionHead := srv.bloomTrieIndexer.SectionHead(bloomTrieLastSection)
   126  		bloomTrieRoot := light.GetBloomTrieRoot(pm.chainDb, bloomTrieLastSection, bloomTrieSectionHead)
   127  		logger.Info("Loaded bloom trie", "section", bloomTrieLastSection, "head", bloomTrieSectionHead, "root", bloomTrieRoot)
   128  	}
   129  
   130  	srv.chtIndexer.Start(eth.BlockChain())
   131  	return srv, nil
   132  }
   133  
   134  func (s *LesServer) APIs() []rpc.API {
   135  	return []rpc.API{
   136  		{
   137  			Namespace: "les",
   138  			Version:   "1.0",
   139  			Service:   NewPrivateLightServerAPI(s),
   140  			Public:    false,
   141  		},
   142  	}
   143  }
   144  
   145  // startEventLoop starts an event handler loop that updates the recharge curve of
   146  // the client manager and adjusts the client pool's size according to the total
   147  // capacity updates coming from the client manager
   148  func (s *LesServer) startEventLoop() {
   149  	s.protocolManager.wg.Add(1)
   150  
   151  	var processing bool
   152  	blockProcFeed := make(chan bool, 100)
   153  	s.protocolManager.blockchain.(*core.BlockChain).SubscribeBlockProcessingEvent(blockProcFeed)
   154  	totalRechargeCh := make(chan uint64, 100)
   155  	totalRecharge := s.costTracker.subscribeTotalRecharge(totalRechargeCh)
   156  	totalCapacityCh := make(chan uint64, 100)
   157  	updateRecharge := func() {
   158  		if processing {
   159  			s.protocolManager.servingQueue.setThreads(s.thcBlockProcessing)
   160  			s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge, totalRecharge}})
   161  		} else {
   162  			s.protocolManager.servingQueue.setThreads(s.thcNormal)
   163  			s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge / 10, totalRecharge}, {totalRecharge, totalRecharge}})
   164  		}
   165  	}
   166  	updateRecharge()
   167  	totalCapacity := s.fcManager.SubscribeTotalCapacity(totalCapacityCh)
   168  	s.priorityClientPool.setLimits(s.maxPeers, totalCapacity)
   169  
   170  	go func() {
   171  		for {
   172  			select {
   173  			case processing = <-blockProcFeed:
   174  				updateRecharge()
   175  			case totalRecharge = <-totalRechargeCh:
   176  				updateRecharge()
   177  			case totalCapacity = <-totalCapacityCh:
   178  				s.priorityClientPool.setLimits(s.maxPeers, totalCapacity)
   179  			case <-s.protocolManager.quitSync:
   180  				s.protocolManager.wg.Done()
   181  				return
   182  			}
   183  		}
   184  	}()
   185  }
   186  
   187  func (s *LesServer) Protocols() []p2p.Protocol {
   188  	return s.makeProtocols(ServerProtocolVersions)
   189  }
   190  
   191  // Start starts the LES server
   192  func (s *LesServer) Start(srvr *p2p.Server) {
   193  	s.maxPeers = s.config.LightPeers
   194  	totalRecharge := s.costTracker.totalRecharge()
   195  	if s.maxPeers > 0 {
   196  		s.freeClientCap = minCapacity //totalRecharge / uint64(s.maxPeers)
   197  		if s.freeClientCap < minCapacity {
   198  			s.freeClientCap = minCapacity
   199  		}
   200  		if s.freeClientCap > 0 {
   201  			s.defParams = flowcontrol.ServerParams{
   202  				BufLimit:    s.freeClientCap * bufLimitRatio,
   203  				MinRecharge: s.freeClientCap,
   204  			}
   205  		}
   206  	}
   207  	freePeers := int(totalRecharge / s.freeClientCap)
   208  	if freePeers < s.maxPeers {
   209  		log.Warn("Light peer count limited", "specified", s.maxPeers, "allowed", freePeers)
   210  	}
   211  
   212  	s.freeClientPool = newFreeClientPool(s.chainDb, s.freeClientCap, 10000, mclock.System{}, func(id string) { go s.protocolManager.removePeer(id) })
   213  	s.priorityClientPool = newPriorityClientPool(s.freeClientCap, s.protocolManager.peers, s.freeClientPool)
   214  
   215  	s.protocolManager.peers.notify(s.priorityClientPool)
   216  	s.startEventLoop()
   217  	s.protocolManager.Start(s.config.LightPeers)
   218  	if srvr.DiscV5 != nil {
   219  		for _, topic := range s.lesTopics {
   220  			topic := topic
   221  			go func() {
   222  				logger := log.New("topic", topic)
   223  				logger.Info("Starting topic registration")
   224  				defer logger.Info("Terminated topic registration")
   225  
   226  				srvr.DiscV5.RegisterTopic(topic, s.quitSync)
   227  			}()
   228  		}
   229  	}
   230  	s.privateKey = srvr.PrivateKey
   231  	s.protocolManager.blockLoop()
   232  }
   233  
   234  func (s *LesServer) SetBloomBitsIndexer(bloomIndexer *core.ChainIndexer) {
   235  	bloomIndexer.AddChildIndexer(s.bloomTrieIndexer)
   236  }
   237  
   238  // Stop stops the LES service
   239  func (s *LesServer) Stop() {
   240  	s.chtIndexer.Close()
   241  	// bloom trie indexer is closed by parent bloombits indexer
   242  	go func() {
   243  		<-s.protocolManager.noMorePeers
   244  	}()
   245  	s.freeClientPool.stop()
   246  	s.costTracker.stop()
   247  	s.protocolManager.Stop()
   248  }
   249  
   250  func (pm *ProtocolManager) blockLoop() {
   251  	pm.wg.Add(1)
   252  	headCh := make(chan core.ChainHeadEvent, 10)
   253  	headSub := pm.blockchain.SubscribeChainHeadEvent(headCh)
   254  	go func() {
   255  		var lastHead *types.Header
   256  		lastBroadcastTd := common.Big0
   257  		for {
   258  			select {
   259  			case ev := <-headCh:
   260  				peers := pm.peers.AllPeers()
   261  				if len(peers) > 0 {
   262  					header := ev.Block.Header()
   263  					hash := header.Hash()
   264  					number := header.Number.Uint64()
   265  					td := rawdb.ReadTd(pm.chainDb, hash, number)
   266  					if td != nil && td.Cmp(lastBroadcastTd) > 0 {
   267  						var reorg uint64
   268  						if lastHead != nil {
   269  							reorg = lastHead.Number.Uint64() - rawdb.FindCommonAncestor(pm.chainDb, header, lastHead).Number.Uint64()
   270  						}
   271  						lastHead = header
   272  						lastBroadcastTd = td
   273  
   274  						log.Debug("Announcing block to peers", "number", number, "hash", hash, "td", td, "reorg", reorg)
   275  
   276  						announce := announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg}
   277  						var (
   278  							signed         bool
   279  							signedAnnounce announceData
   280  						)
   281  
   282  						for _, p := range peers {
   283  							p := p
   284  							switch p.announceType {
   285  							case announceTypeSimple:
   286  								p.queueSend(func() { p.SendAnnounce(announce) })
   287  							case announceTypeSigned:
   288  								if !signed {
   289  									signedAnnounce = announce
   290  									signedAnnounce.sign(pm.server.privateKey)
   291  									signed = true
   292  								}
   293  								p.queueSend(func() { p.SendAnnounce(signedAnnounce) })
   294  							}
   295  						}
   296  					}
   297  				}
   298  			case <-pm.quitSync:
   299  				headSub.Unsubscribe()
   300  				pm.wg.Done()
   301  				return
   302  			}
   303  		}
   304  	}()
   305  }