github.com/nitinawathare/ethereumassignment3@v0.0.0-20211021213010-f07344c2b868/go-ethereum/les/api.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"sync"
    23  	"time"
    24  
    25  	"github.com/ethereum/go-ethereum/common/hexutil"
    26  	"github.com/ethereum/go-ethereum/common/mclock"
    27  	"github.com/ethereum/go-ethereum/p2p/enode"
    28  	"github.com/ethereum/go-ethereum/rpc"
    29  )
    30  
    31  var (
    32  	ErrMinCap               = errors.New("capacity too small")
    33  	ErrTotalCap             = errors.New("total capacity exceeded")
    34  	ErrUnknownBenchmarkType = errors.New("unknown benchmark type")
    35  
    36  	dropCapacityDelay = time.Second // delay applied to decreasing capacity changes
    37  )
    38  
    39  // PrivateLightServerAPI provides an API to access the LES light server.
    40  // It offers only methods that operate on public data that is freely available to anyone.
    41  type PrivateLightServerAPI struct {
    42  	server *LesServer
    43  }
    44  
    45  // NewPrivateLightServerAPI creates a new LES light server API.
    46  func NewPrivateLightServerAPI(server *LesServer) *PrivateLightServerAPI {
    47  	return &PrivateLightServerAPI{
    48  		server: server,
    49  	}
    50  }
    51  
    52  // TotalCapacity queries total available capacity for all clients
    53  func (api *PrivateLightServerAPI) TotalCapacity() hexutil.Uint64 {
    54  	return hexutil.Uint64(api.server.priorityClientPool.totalCapacity())
    55  }
    56  
    57  // SubscribeTotalCapacity subscribes to changed total capacity events.
    58  // If onlyUnderrun is true then notification is sent only if the total capacity
    59  // drops under the total capacity of connected priority clients.
    60  //
    61  // Note: actually applying decreasing total capacity values is delayed while the
    62  // notification is sent instantly. This allows lowering the capacity of a priority client
    63  // or choosing which one to drop before the system drops some of them automatically.
    64  func (api *PrivateLightServerAPI) SubscribeTotalCapacity(ctx context.Context, onlyUnderrun bool) (*rpc.Subscription, error) {
    65  	notifier, supported := rpc.NotifierFromContext(ctx)
    66  	if !supported {
    67  		return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
    68  	}
    69  	rpcSub := notifier.CreateSubscription()
    70  	api.server.priorityClientPool.subscribeTotalCapacity(&tcSubscription{notifier, rpcSub, onlyUnderrun})
    71  	return rpcSub, nil
    72  }
    73  
    74  type (
    75  	// tcSubscription represents a total capacity subscription
    76  	tcSubscription struct {
    77  		notifier     *rpc.Notifier
    78  		rpcSub       *rpc.Subscription
    79  		onlyUnderrun bool
    80  	}
    81  	tcSubs map[*tcSubscription]struct{}
    82  )
    83  
    84  // send sends a changed total capacity event to the subscribers
    85  func (s tcSubs) send(tc uint64, underrun bool) {
    86  	for sub := range s {
    87  		select {
    88  		case <-sub.rpcSub.Err():
    89  			delete(s, sub)
    90  		case <-sub.notifier.Closed():
    91  			delete(s, sub)
    92  		default:
    93  			if underrun || !sub.onlyUnderrun {
    94  				sub.notifier.Notify(sub.rpcSub.ID, tc)
    95  			}
    96  		}
    97  	}
    98  }
    99  
   100  // MinimumCapacity queries minimum assignable capacity for a single client
   101  func (api *PrivateLightServerAPI) MinimumCapacity() hexutil.Uint64 {
   102  	return hexutil.Uint64(minCapacity)
   103  }
   104  
   105  // FreeClientCapacity queries the capacity provided for free clients
   106  func (api *PrivateLightServerAPI) FreeClientCapacity() hexutil.Uint64 {
   107  	return hexutil.Uint64(api.server.freeClientCap)
   108  }
   109  
   110  // SetClientCapacity sets the priority capacity assigned to a given client.
   111  // If the assigned capacity is bigger than zero then connection is always
   112  // guaranteed. The sum of capacity assigned to priority clients can not exceed
   113  // the total available capacity.
   114  //
   115  // Note: assigned capacity can be changed while the client is connected with
   116  // immediate effect.
   117  func (api *PrivateLightServerAPI) SetClientCapacity(id enode.ID, cap uint64) error {
   118  	if cap != 0 && cap < minCapacity {
   119  		return ErrMinCap
   120  	}
   121  	return api.server.priorityClientPool.setClientCapacity(id, cap)
   122  }
   123  
   124  // GetClientCapacity returns the capacity assigned to a given client
   125  func (api *PrivateLightServerAPI) GetClientCapacity(id enode.ID) hexutil.Uint64 {
   126  	api.server.priorityClientPool.lock.Lock()
   127  	defer api.server.priorityClientPool.lock.Unlock()
   128  
   129  	return hexutil.Uint64(api.server.priorityClientPool.clients[id].cap)
   130  }
   131  
   132  // clientPool is implemented by both the free and priority client pools
   133  type clientPool interface {
   134  	peerSetNotify
   135  	setLimits(count int, totalCap uint64)
   136  }
   137  
   138  // priorityClientPool stores information about prioritized clients
   139  type priorityClientPool struct {
   140  	lock                             sync.Mutex
   141  	child                            clientPool
   142  	ps                               *peerSet
   143  	clients                          map[enode.ID]priorityClientInfo
   144  	totalCap, totalCapAnnounced      uint64
   145  	totalConnectedCap, freeClientCap uint64
   146  	maxPeers, priorityCount          int
   147  
   148  	subs            tcSubs
   149  	updateSchedule  []scheduledUpdate
   150  	scheduleCounter uint64
   151  }
   152  
   153  // scheduledUpdate represents a delayed total capacity update
   154  type scheduledUpdate struct {
   155  	time         mclock.AbsTime
   156  	totalCap, id uint64
   157  }
   158  
   159  // priorityClientInfo entries exist for all prioritized clients and currently connected non-priority clients
   160  type priorityClientInfo struct {
   161  	cap       uint64 // zero for non-priority clients
   162  	connected bool
   163  	peer      *peer
   164  }
   165  
   166  // newPriorityClientPool creates a new priority client pool
   167  func newPriorityClientPool(freeClientCap uint64, ps *peerSet, child clientPool) *priorityClientPool {
   168  	return &priorityClientPool{
   169  		clients:       make(map[enode.ID]priorityClientInfo),
   170  		freeClientCap: freeClientCap,
   171  		ps:            ps,
   172  		child:         child,
   173  	}
   174  }
   175  
   176  // registerPeer is called when a new client is connected. If the client has no
   177  // priority assigned then it is passed to the child pool which may either keep it
   178  // or disconnect it.
   179  //
   180  // Note: priorityClientPool also stores a record about free clients while they are
   181  // connected in order to be able to assign priority to them later.
   182  func (v *priorityClientPool) registerPeer(p *peer) {
   183  	v.lock.Lock()
   184  	defer v.lock.Unlock()
   185  
   186  	id := p.ID()
   187  	c := v.clients[id]
   188  	if c.connected {
   189  		return
   190  	}
   191  	if c.cap == 0 && v.child != nil {
   192  		v.child.registerPeer(p)
   193  	}
   194  	if c.cap != 0 && v.totalConnectedCap+c.cap > v.totalCap {
   195  		go v.ps.Unregister(p.id)
   196  		return
   197  	}
   198  
   199  	c.connected = true
   200  	c.peer = p
   201  	v.clients[id] = c
   202  	if c.cap != 0 {
   203  		v.priorityCount++
   204  		v.totalConnectedCap += c.cap
   205  		if v.child != nil {
   206  			v.child.setLimits(v.maxPeers-v.priorityCount, v.totalCap-v.totalConnectedCap)
   207  		}
   208  		p.updateCapacity(c.cap)
   209  	}
   210  }
   211  
   212  // unregisterPeer is called when a client is disconnected. If the client has no
   213  // priority assigned then it is also removed from the child pool.
   214  func (v *priorityClientPool) unregisterPeer(p *peer) {
   215  	v.lock.Lock()
   216  	defer v.lock.Unlock()
   217  
   218  	id := p.ID()
   219  	c := v.clients[id]
   220  	if !c.connected {
   221  		return
   222  	}
   223  	if c.cap != 0 {
   224  		c.connected = false
   225  		v.clients[id] = c
   226  		v.priorityCount--
   227  		v.totalConnectedCap -= c.cap
   228  		if v.child != nil {
   229  			v.child.setLimits(v.maxPeers-v.priorityCount, v.totalCap-v.totalConnectedCap)
   230  		}
   231  	} else {
   232  		if v.child != nil {
   233  			v.child.unregisterPeer(p)
   234  		}
   235  		delete(v.clients, id)
   236  	}
   237  }
   238  
   239  // setLimits updates the allowed peer count and total capacity of the priority
   240  // client pool. Since the free client pool is a child of the priority pool the
   241  // remaining peer count and capacity is assigned to the free pool by calling its
   242  // own setLimits function.
   243  //
   244  // Note: a decreasing change of the total capacity is applied with a delay.
   245  func (v *priorityClientPool) setLimits(count int, totalCap uint64) {
   246  	v.lock.Lock()
   247  	defer v.lock.Unlock()
   248  
   249  	v.totalCapAnnounced = totalCap
   250  	if totalCap > v.totalCap {
   251  		v.setLimitsNow(count, totalCap)
   252  		v.subs.send(totalCap, false)
   253  		return
   254  	}
   255  	v.setLimitsNow(count, v.totalCap)
   256  	if totalCap < v.totalCap {
   257  		v.subs.send(totalCap, totalCap < v.totalConnectedCap)
   258  		for i, s := range v.updateSchedule {
   259  			if totalCap >= s.totalCap {
   260  				s.totalCap = totalCap
   261  				v.updateSchedule = v.updateSchedule[:i+1]
   262  				return
   263  			}
   264  		}
   265  		v.updateSchedule = append(v.updateSchedule, scheduledUpdate{time: mclock.Now() + mclock.AbsTime(dropCapacityDelay), totalCap: totalCap})
   266  		if len(v.updateSchedule) == 1 {
   267  			v.scheduleCounter++
   268  			id := v.scheduleCounter
   269  			v.updateSchedule[0].id = id
   270  			time.AfterFunc(dropCapacityDelay, func() { v.checkUpdate(id) })
   271  		}
   272  	} else {
   273  		v.updateSchedule = nil
   274  	}
   275  }
   276  
   277  // checkUpdate performs the next scheduled update if possible and schedules
   278  // the one after that
   279  func (v *priorityClientPool) checkUpdate(id uint64) {
   280  	v.lock.Lock()
   281  	defer v.lock.Unlock()
   282  
   283  	if len(v.updateSchedule) == 0 || v.updateSchedule[0].id != id {
   284  		return
   285  	}
   286  	v.setLimitsNow(v.maxPeers, v.updateSchedule[0].totalCap)
   287  	v.updateSchedule = v.updateSchedule[1:]
   288  	if len(v.updateSchedule) != 0 {
   289  		v.scheduleCounter++
   290  		id := v.scheduleCounter
   291  		v.updateSchedule[0].id = id
   292  		dt := time.Duration(v.updateSchedule[0].time - mclock.Now())
   293  		time.AfterFunc(dt, func() { v.checkUpdate(id) })
   294  	}
   295  }
   296  
   297  // setLimits updates the allowed peer count and total capacity immediately
   298  func (v *priorityClientPool) setLimitsNow(count int, totalCap uint64) {
   299  	if v.priorityCount > count || v.totalConnectedCap > totalCap {
   300  		for id, c := range v.clients {
   301  			if c.connected {
   302  				c.connected = false
   303  				v.totalConnectedCap -= c.cap
   304  				v.priorityCount--
   305  				v.clients[id] = c
   306  				go v.ps.Unregister(c.peer.id)
   307  				if v.priorityCount <= count && v.totalConnectedCap <= totalCap {
   308  					break
   309  				}
   310  			}
   311  		}
   312  	}
   313  	v.maxPeers = count
   314  	v.totalCap = totalCap
   315  	if v.child != nil {
   316  		v.child.setLimits(v.maxPeers-v.priorityCount, v.totalCap-v.totalConnectedCap)
   317  	}
   318  }
   319  
   320  // totalCapacity queries total available capacity for all clients
   321  func (v *priorityClientPool) totalCapacity() uint64 {
   322  	v.lock.Lock()
   323  	defer v.lock.Unlock()
   324  
   325  	return v.totalCapAnnounced
   326  }
   327  
   328  // subscribeTotalCapacity subscribes to changed total capacity events
   329  func (v *priorityClientPool) subscribeTotalCapacity(sub *tcSubscription) {
   330  	v.lock.Lock()
   331  	defer v.lock.Unlock()
   332  
   333  	v.subs[sub] = struct{}{}
   334  }
   335  
   336  // setClientCapacity sets the priority capacity assigned to a given client
   337  func (v *priorityClientPool) setClientCapacity(id enode.ID, cap uint64) error {
   338  	v.lock.Lock()
   339  	defer v.lock.Unlock()
   340  
   341  	c := v.clients[id]
   342  	if c.cap == cap {
   343  		return nil
   344  	}
   345  	if c.connected {
   346  		if v.totalConnectedCap+cap > v.totalCap+c.cap {
   347  			return ErrTotalCap
   348  		}
   349  		if c.cap == 0 {
   350  			if v.child != nil {
   351  				v.child.unregisterPeer(c.peer)
   352  			}
   353  			v.priorityCount++
   354  		}
   355  		if cap == 0 {
   356  			v.priorityCount--
   357  		}
   358  		v.totalConnectedCap += cap - c.cap
   359  		if v.child != nil {
   360  			v.child.setLimits(v.maxPeers-v.priorityCount, v.totalCap-v.totalConnectedCap)
   361  		}
   362  		if cap == 0 {
   363  			if v.child != nil {
   364  				v.child.registerPeer(c.peer)
   365  			}
   366  			c.peer.updateCapacity(v.freeClientCap)
   367  		} else {
   368  			c.peer.updateCapacity(cap)
   369  		}
   370  	}
   371  	if cap != 0 || c.connected {
   372  		c.cap = cap
   373  		v.clients[id] = c
   374  	} else {
   375  		delete(v.clients, id)
   376  	}
   377  	return nil
   378  }
   379  
   380  // Benchmark runs a request performance benchmark with a given set of measurement setups
   381  // in multiple passes specified by passCount. The measurement time for each setup in each
   382  // pass is specified in milliseconds by length.
   383  //
   384  // Note: measurement time is adjusted for each pass depending on the previous ones.
   385  // Therefore a controlled total measurement time is achievable in multiple passes.
   386  func (api *PrivateLightServerAPI) Benchmark(setups []map[string]interface{}, passCount, length int) ([]map[string]interface{}, error) {
   387  	benchmarks := make([]requestBenchmark, len(setups))
   388  	for i, setup := range setups {
   389  		if t, ok := setup["type"].(string); ok {
   390  			getInt := func(field string, def int) int {
   391  				if value, ok := setup[field].(float64); ok {
   392  					return int(value)
   393  				}
   394  				return def
   395  			}
   396  			getBool := func(field string, def bool) bool {
   397  				if value, ok := setup[field].(bool); ok {
   398  					return value
   399  				}
   400  				return def
   401  			}
   402  			switch t {
   403  			case "header":
   404  				benchmarks[i] = &benchmarkBlockHeaders{
   405  					amount:  getInt("amount", 1),
   406  					skip:    getInt("skip", 1),
   407  					byHash:  getBool("byHash", false),
   408  					reverse: getBool("reverse", false),
   409  				}
   410  			case "body":
   411  				benchmarks[i] = &benchmarkBodiesOrReceipts{receipts: false}
   412  			case "receipts":
   413  				benchmarks[i] = &benchmarkBodiesOrReceipts{receipts: true}
   414  			case "proof":
   415  				benchmarks[i] = &benchmarkProofsOrCode{code: false}
   416  			case "code":
   417  				benchmarks[i] = &benchmarkProofsOrCode{code: true}
   418  			case "cht":
   419  				benchmarks[i] = &benchmarkHelperTrie{
   420  					bloom:    false,
   421  					reqCount: getInt("amount", 1),
   422  				}
   423  			case "bloom":
   424  				benchmarks[i] = &benchmarkHelperTrie{
   425  					bloom:    true,
   426  					reqCount: getInt("amount", 1),
   427  				}
   428  			case "txSend":
   429  				benchmarks[i] = &benchmarkTxSend{}
   430  			case "txStatus":
   431  				benchmarks[i] = &benchmarkTxStatus{}
   432  			default:
   433  				return nil, ErrUnknownBenchmarkType
   434  			}
   435  		} else {
   436  			return nil, ErrUnknownBenchmarkType
   437  		}
   438  	}
   439  	rs := api.server.protocolManager.runBenchmark(benchmarks, passCount, time.Millisecond*time.Duration(length))
   440  	result := make([]map[string]interface{}, len(setups))
   441  	for i, r := range rs {
   442  		res := make(map[string]interface{})
   443  		if r.err == nil {
   444  			res["totalCount"] = r.totalCount
   445  			res["avgTime"] = r.avgTime
   446  			res["maxInSize"] = r.maxInSize
   447  			res["maxOutSize"] = r.maxOutSize
   448  		} else {
   449  			res["error"] = r.err.Error()
   450  		}
   451  		result[i] = res
   452  	}
   453  	return result, nil
   454  }