github.com/theQRL/go-zond@v0.1.1/les/vflux/server/prioritypool.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package server
    18  
    19  import (
    20  	"math"
    21  	"sync"
    22  	"time"
    23  
    24  	"github.com/theQRL/go-zond/common/mclock"
    25  	"github.com/theQRL/go-zond/common/prque"
    26  	"github.com/theQRL/go-zond/log"
    27  	"github.com/theQRL/go-zond/p2p/enode"
    28  	"github.com/theQRL/go-zond/p2p/nodestate"
    29  )
    30  
    31  const (
    32  	lazyQueueRefresh = time.Second * 10 // refresh period of the active queue
    33  )
    34  
    35  // priorityPool handles a set of nodes where each node has a capacity (a scalar value)
    36  // and a priority (which can change over time and can also depend on the capacity).
    37  // A node is active if it has at least the necessary minimal amount of capacity while
    38  // inactive nodes have 0 capacity (values between 0 and the minimum are not allowed).
    39  // The pool ensures that the number and total capacity of all active nodes are limited
    40  // and the highest priority nodes are active at all times (limits can be changed
    41  // during operation with immediate effect).
    42  //
    43  // When activating clients a priority bias is applied in favor of the already active
    44  // nodes in order to avoid nodes quickly alternating between active and inactive states
    45  // when their priorities are close to each other. The bias is specified in terms of
    46  // duration (time) because priorities are expected to usually get lower over time and
    47  // therefore a future minimum prediction (see EstMinPriority) should monotonously
    48  // decrease with the specified time parameter.
    49  // This time bias can be interpreted as minimum expected active time at the given
    50  // capacity (if the threshold priority stays the same).
    51  //
    52  // Nodes in the pool always have either inactiveFlag or activeFlag set. A new node is
    53  // added to the pool by externally setting inactiveFlag. priorityPool can switch a node
    54  // between inactiveFlag and activeFlag at any time. Nodes can be removed from the pool
    55  // by externally resetting both flags. activeFlag should not be set externally.
    56  //
    57  // The highest priority nodes in "inactive" state are moved to "active" state as soon as
    58  // the minimum capacity can be granted for them. The capacity of lower priority active
    59  // nodes is reduced or they are demoted to "inactive" state if their priority is
    60  // insufficient even at minimal capacity.
    61  type priorityPool struct {
    62  	setup                        *serverSetup
    63  	ns                           *nodestate.NodeStateMachine
    64  	clock                        mclock.Clock
    65  	lock                         sync.Mutex
    66  	maxCount, maxCap             uint64
    67  	minCap                       uint64
    68  	activeBias                   time.Duration
    69  	capacityStepDiv, fineStepDiv uint64
    70  
    71  	// The snapshot of priority pool for query.
    72  	cachedCurve    *capacityCurve
    73  	ccUpdatedAt    mclock.AbsTime
    74  	ccUpdateForced bool
    75  
    76  	// Runtime status of prioritypool, represents the
    77  	// temporary state if tempState is not empty
    78  	tempState              []*ppNodeInfo
    79  	activeCount, activeCap uint64
    80  	activeQueue            *prque.LazyQueue[int64, *ppNodeInfo]
    81  	inactiveQueue          *prque.Prque[int64, *ppNodeInfo]
    82  }
    83  
    84  // ppNodeInfo is the internal node descriptor of priorityPool
    85  type ppNodeInfo struct {
    86  	nodePriority               nodePriority
    87  	node                       *enode.Node
    88  	connected                  bool
    89  	capacity                   uint64 // only changed when temporary state is committed
    90  	activeIndex, inactiveIndex int
    91  
    92  	tempState    bool   // should only be true while the priorityPool lock is held
    93  	tempCapacity uint64 // equals capacity when tempState is false
    94  
    95  	// the following fields only affect the temporary state and they are set to their
    96  	// default value when leaving the temp state
    97  	minTarget, stepDiv uint64
    98  	bias               time.Duration
    99  }
   100  
   101  // newPriorityPool creates a new priorityPool
   102  func newPriorityPool(ns *nodestate.NodeStateMachine, setup *serverSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv, fineStepDiv uint64) *priorityPool {
   103  	pp := &priorityPool{
   104  		setup:           setup,
   105  		ns:              ns,
   106  		clock:           clock,
   107  		inactiveQueue:   prque.New[int64, *ppNodeInfo](inactiveSetIndex),
   108  		minCap:          minCap,
   109  		activeBias:      activeBias,
   110  		capacityStepDiv: capacityStepDiv,
   111  		fineStepDiv:     fineStepDiv,
   112  	}
   113  	if pp.activeBias < time.Duration(1) {
   114  		pp.activeBias = time.Duration(1)
   115  	}
   116  	pp.activeQueue = prque.NewLazyQueue(activeSetIndex, activePriority, pp.activeMaxPriority, clock, lazyQueueRefresh)
   117  
   118  	ns.SubscribeField(pp.setup.balanceField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
   119  		if newValue != nil {
   120  			c := &ppNodeInfo{
   121  				node:          node,
   122  				nodePriority:  newValue.(nodePriority),
   123  				activeIndex:   -1,
   124  				inactiveIndex: -1,
   125  			}
   126  			ns.SetFieldSub(node, pp.setup.queueField, c)
   127  			ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0)
   128  		} else {
   129  			ns.SetStateSub(node, nodestate.Flags{}, pp.setup.activeFlag.Or(pp.setup.inactiveFlag), 0)
   130  			if n, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo); n != nil {
   131  				pp.disconnectNode(n)
   132  			}
   133  			ns.SetFieldSub(node, pp.setup.capacityField, nil)
   134  			ns.SetFieldSub(node, pp.setup.queueField, nil)
   135  		}
   136  	})
   137  	ns.SubscribeState(pp.setup.activeFlag.Or(pp.setup.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
   138  		if c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo); c != nil {
   139  			if oldState.IsEmpty() {
   140  				pp.connectNode(c)
   141  			}
   142  			if newState.IsEmpty() {
   143  				pp.disconnectNode(c)
   144  			}
   145  		}
   146  	})
   147  	ns.SubscribeState(pp.setup.updateFlag, func(node *enode.Node, oldState, newState nodestate.Flags) {
   148  		if !newState.IsEmpty() {
   149  			pp.updatePriority(node)
   150  		}
   151  	})
   152  	return pp
   153  }
   154  
   155  // requestCapacity tries to set the capacity of a connected node to the highest possible
   156  // value inside the given target range. If maxTarget is not reachable then the capacity is
   157  // iteratively reduced in fine steps based on the fineStepDiv parameter until minTarget is reached.
   158  // The function returns the new capacity if successful and the original capacity otherwise.
   159  // Note: this function should run inside a NodeStateMachine operation
   160  func (pp *priorityPool) requestCapacity(node *enode.Node, minTarget, maxTarget uint64, bias time.Duration) uint64 {
   161  	pp.lock.Lock()
   162  	pp.activeQueue.Refresh()
   163  
   164  	if minTarget < pp.minCap {
   165  		minTarget = pp.minCap
   166  	}
   167  	if maxTarget < minTarget {
   168  		maxTarget = minTarget
   169  	}
   170  	if bias < pp.activeBias {
   171  		bias = pp.activeBias
   172  	}
   173  	c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo)
   174  	if c == nil {
   175  		log.Error("requestCapacity called for unknown node", "id", node.ID())
   176  		pp.lock.Unlock()
   177  		return 0
   178  	}
   179  	pp.setTempState(c)
   180  	if maxTarget > c.capacity {
   181  		pp.setTempStepDiv(c, pp.fineStepDiv)
   182  		pp.setTempBias(c, bias)
   183  	}
   184  	pp.setTempCapacity(c, maxTarget)
   185  	c.minTarget = minTarget
   186  	pp.removeFromQueues(c)
   187  	pp.activeQueue.Push(c)
   188  	pp.enforceLimits()
   189  	updates := pp.finalizeChanges(c.tempCapacity >= minTarget && c.tempCapacity <= maxTarget && c.tempCapacity != c.capacity)
   190  	pp.lock.Unlock()
   191  	pp.updateFlags(updates)
   192  	return c.capacity
   193  }
   194  
   195  // SetLimits sets the maximum number and total capacity of simultaneously active nodes
   196  func (pp *priorityPool) SetLimits(maxCount, maxCap uint64) {
   197  	pp.lock.Lock()
   198  	pp.activeQueue.Refresh()
   199  	inc := (maxCount > pp.maxCount) || (maxCap > pp.maxCap)
   200  	dec := (maxCount < pp.maxCount) || (maxCap < pp.maxCap)
   201  	pp.maxCount, pp.maxCap = maxCount, maxCap
   202  
   203  	var updates []capUpdate
   204  	if dec {
   205  		pp.enforceLimits()
   206  		updates = pp.finalizeChanges(true)
   207  	}
   208  	if inc {
   209  		updates = append(updates, pp.tryActivate(false)...)
   210  	}
   211  	pp.lock.Unlock()
   212  	pp.ns.Operation(func() { pp.updateFlags(updates) })
   213  }
   214  
   215  // setActiveBias sets the bias applied when trying to activate inactive nodes
   216  func (pp *priorityPool) setActiveBias(bias time.Duration) {
   217  	pp.lock.Lock()
   218  	pp.activeBias = bias
   219  	if pp.activeBias < time.Duration(1) {
   220  		pp.activeBias = time.Duration(1)
   221  	}
   222  	updates := pp.tryActivate(false)
   223  	pp.lock.Unlock()
   224  	pp.ns.Operation(func() { pp.updateFlags(updates) })
   225  }
   226  
   227  // Active returns the number and total capacity of currently active nodes
   228  func (pp *priorityPool) Active() (uint64, uint64) {
   229  	pp.lock.Lock()
   230  	defer pp.lock.Unlock()
   231  
   232  	return pp.activeCount, pp.activeCap
   233  }
   234  
   235  // Inactive returns the number of currently inactive nodes
   236  func (pp *priorityPool) Inactive() int {
   237  	pp.lock.Lock()
   238  	defer pp.lock.Unlock()
   239  
   240  	return pp.inactiveQueue.Size()
   241  }
   242  
   243  // Limits returns the maximum allowed number and total capacity of active nodes
   244  func (pp *priorityPool) Limits() (uint64, uint64) {
   245  	pp.lock.Lock()
   246  	defer pp.lock.Unlock()
   247  
   248  	return pp.maxCount, pp.maxCap
   249  }
   250  
   251  // inactiveSetIndex callback updates ppNodeInfo item index in inactiveQueue
   252  func inactiveSetIndex(a *ppNodeInfo, index int) {
   253  	a.inactiveIndex = index
   254  }
   255  
   256  // activeSetIndex callback updates ppNodeInfo item index in activeQueue
   257  func activeSetIndex(a *ppNodeInfo, index int) {
   258  	a.activeIndex = index
   259  }
   260  
   261  // invertPriority inverts a priority value. The active queue uses inverted priorities
   262  // because the node on the top is the first to be deactivated.
   263  func invertPriority(p int64) int64 {
   264  	if p == math.MinInt64 {
   265  		return math.MaxInt64
   266  	}
   267  	return -p
   268  }
   269  
   270  // activePriority callback returns actual priority of ppNodeInfo item in activeQueue
   271  func activePriority(c *ppNodeInfo) int64 {
   272  	if c.bias == 0 {
   273  		return invertPriority(c.nodePriority.priority(c.tempCapacity))
   274  	} else {
   275  		return invertPriority(c.nodePriority.estimatePriority(c.tempCapacity, 0, 0, c.bias, true))
   276  	}
   277  }
   278  
   279  // activeMaxPriority callback returns estimated maximum priority of ppNodeInfo item in activeQueue
   280  func (pp *priorityPool) activeMaxPriority(c *ppNodeInfo, until mclock.AbsTime) int64 {
   281  	future := time.Duration(until - pp.clock.Now())
   282  	if future < 0 {
   283  		future = 0
   284  	}
   285  	return invertPriority(c.nodePriority.estimatePriority(c.tempCapacity, 0, future, c.bias, false))
   286  }
   287  
   288  // inactivePriority callback returns actual priority of ppNodeInfo item in inactiveQueue
   289  func (pp *priorityPool) inactivePriority(p *ppNodeInfo) int64 {
   290  	return p.nodePriority.priority(pp.minCap)
   291  }
   292  
   293  // removeFromQueues removes the node from the active/inactive queues
   294  func (pp *priorityPool) removeFromQueues(c *ppNodeInfo) {
   295  	if c.activeIndex >= 0 {
   296  		pp.activeQueue.Remove(c.activeIndex)
   297  	}
   298  	if c.inactiveIndex >= 0 {
   299  		pp.inactiveQueue.Remove(c.inactiveIndex)
   300  	}
   301  }
   302  
   303  // connectNode is called when a new node has been added to the pool (inactiveFlag set)
   304  // Note: this function should run inside a NodeStateMachine operation
   305  func (pp *priorityPool) connectNode(c *ppNodeInfo) {
   306  	pp.lock.Lock()
   307  	pp.activeQueue.Refresh()
   308  	if c.connected {
   309  		pp.lock.Unlock()
   310  		return
   311  	}
   312  	c.connected = true
   313  	pp.inactiveQueue.Push(c, pp.inactivePriority(c))
   314  	updates := pp.tryActivate(false)
   315  	pp.lock.Unlock()
   316  	pp.updateFlags(updates)
   317  }
   318  
   319  // disconnectNode is called when a node has been removed from the pool (both inactiveFlag
   320  // and activeFlag reset)
   321  // Note: this function should run inside a NodeStateMachine operation
   322  func (pp *priorityPool) disconnectNode(c *ppNodeInfo) {
   323  	pp.lock.Lock()
   324  	pp.activeQueue.Refresh()
   325  	if !c.connected {
   326  		pp.lock.Unlock()
   327  		return
   328  	}
   329  	c.connected = false
   330  	pp.removeFromQueues(c)
   331  
   332  	var updates []capUpdate
   333  	if c.capacity != 0 {
   334  		pp.setTempState(c)
   335  		pp.setTempCapacity(c, 0)
   336  		updates = pp.tryActivate(true)
   337  	}
   338  	pp.lock.Unlock()
   339  	pp.updateFlags(updates)
   340  }
   341  
   342  // setTempState internally puts a node in a temporary state that can either be reverted
   343  // or confirmed later. This temporary state allows changing the capacity of a node and
   344  // moving it between the active and inactive queue. activeFlag/inactiveFlag and
   345  // capacityField are not changed while the changes are still temporary.
   346  func (pp *priorityPool) setTempState(c *ppNodeInfo) {
   347  	if c.tempState {
   348  		return
   349  	}
   350  	c.tempState = true
   351  	if c.tempCapacity != c.capacity { // should never happen
   352  		log.Error("tempCapacity != capacity when entering tempState")
   353  	}
   354  	// Assign all the defaults to the temp state.
   355  	c.minTarget = pp.minCap
   356  	c.stepDiv = pp.capacityStepDiv
   357  	c.bias = 0
   358  	pp.tempState = append(pp.tempState, c)
   359  }
   360  
   361  // unsetTempState revokes the temp status of the node and reset all internal
   362  // fields to the default value.
   363  func (pp *priorityPool) unsetTempState(c *ppNodeInfo) {
   364  	if !c.tempState {
   365  		return
   366  	}
   367  	c.tempState = false
   368  	if c.tempCapacity != c.capacity { // should never happen
   369  		log.Error("tempCapacity != capacity when leaving tempState")
   370  	}
   371  	c.minTarget = pp.minCap
   372  	c.stepDiv = pp.capacityStepDiv
   373  	c.bias = 0
   374  }
   375  
   376  // setTempCapacity changes the capacity of a node in the temporary state and adjusts
   377  // activeCap and activeCount accordingly. Since this change is performed in the temporary
   378  // state it should be called after setTempState and before finalizeChanges.
   379  func (pp *priorityPool) setTempCapacity(c *ppNodeInfo, cap uint64) {
   380  	if !c.tempState { // should never happen
   381  		log.Error("Node is not in temporary state")
   382  		return
   383  	}
   384  	pp.activeCap += cap - c.tempCapacity
   385  	if c.tempCapacity == 0 {
   386  		pp.activeCount++
   387  	}
   388  	if cap == 0 {
   389  		pp.activeCount--
   390  	}
   391  	c.tempCapacity = cap
   392  }
   393  
   394  // setTempBias changes the connection bias of a node in the temporary state.
   395  func (pp *priorityPool) setTempBias(c *ppNodeInfo, bias time.Duration) {
   396  	if !c.tempState { // should never happen
   397  		log.Error("Node is not in temporary state")
   398  		return
   399  	}
   400  	c.bias = bias
   401  }
   402  
   403  // setTempStepDiv changes the capacity divisor of a node in the temporary state.
   404  func (pp *priorityPool) setTempStepDiv(c *ppNodeInfo, stepDiv uint64) {
   405  	if !c.tempState { // should never happen
   406  		log.Error("Node is not in temporary state")
   407  		return
   408  	}
   409  	c.stepDiv = stepDiv
   410  }
   411  
   412  // enforceLimits enforces active node count and total capacity limits. It returns the
   413  // lowest active node priority. Note that this function is performed on the temporary
   414  // internal state.
   415  func (pp *priorityPool) enforceLimits() (*ppNodeInfo, int64) {
   416  	if pp.activeCap <= pp.maxCap && pp.activeCount <= pp.maxCount {
   417  		return nil, math.MinInt64
   418  	}
   419  	var (
   420  		lastNode          *ppNodeInfo
   421  		maxActivePriority int64
   422  	)
   423  	pp.activeQueue.MultiPop(func(c *ppNodeInfo, priority int64) bool {
   424  		lastNode = c
   425  		pp.setTempState(c)
   426  		maxActivePriority = priority
   427  		if c.tempCapacity == c.minTarget || pp.activeCount > pp.maxCount {
   428  			pp.setTempCapacity(c, 0)
   429  		} else {
   430  			sub := c.tempCapacity / c.stepDiv
   431  			if sub == 0 {
   432  				sub = 1
   433  			}
   434  			if c.tempCapacity-sub < c.minTarget {
   435  				sub = c.tempCapacity - c.minTarget
   436  			}
   437  			pp.setTempCapacity(c, c.tempCapacity-sub)
   438  			pp.activeQueue.Push(c)
   439  		}
   440  		return pp.activeCap > pp.maxCap || pp.activeCount > pp.maxCount
   441  	})
   442  	return lastNode, invertPriority(maxActivePriority)
   443  }
   444  
   445  // finalizeChanges either commits or reverts temporary changes. The necessary capacity
   446  // field and according flag updates are not performed here but returned in a list because
   447  // they should be performed while the mutex is not held.
   448  func (pp *priorityPool) finalizeChanges(commit bool) (updates []capUpdate) {
   449  	for _, c := range pp.tempState {
   450  		// always remove and push back in order to update biased priority
   451  		pp.removeFromQueues(c)
   452  		oldCapacity := c.capacity
   453  		if commit {
   454  			c.capacity = c.tempCapacity
   455  		} else {
   456  			pp.setTempCapacity(c, c.capacity) // revert activeCount/activeCap
   457  		}
   458  		pp.unsetTempState(c)
   459  
   460  		if c.connected {
   461  			if c.capacity != 0 {
   462  				pp.activeQueue.Push(c)
   463  			} else {
   464  				pp.inactiveQueue.Push(c, pp.inactivePriority(c))
   465  			}
   466  			if c.capacity != oldCapacity {
   467  				updates = append(updates, capUpdate{c.node, oldCapacity, c.capacity})
   468  			}
   469  		}
   470  	}
   471  	pp.tempState = nil
   472  	if commit {
   473  		pp.ccUpdateForced = true
   474  	}
   475  	return
   476  }
   477  
   478  // capUpdate describes a capacityField and activeFlag/inactiveFlag update
   479  type capUpdate struct {
   480  	node           *enode.Node
   481  	oldCap, newCap uint64
   482  }
   483  
   484  // updateFlags performs capacityField and activeFlag/inactiveFlag updates while the
   485  // pool mutex is not held
   486  // Note: this function should run inside a NodeStateMachine operation
   487  func (pp *priorityPool) updateFlags(updates []capUpdate) {
   488  	for _, f := range updates {
   489  		if f.oldCap == 0 {
   490  			pp.ns.SetStateSub(f.node, pp.setup.activeFlag, pp.setup.inactiveFlag, 0)
   491  		}
   492  		if f.newCap == 0 {
   493  			pp.ns.SetStateSub(f.node, pp.setup.inactiveFlag, pp.setup.activeFlag, 0)
   494  			pp.ns.SetFieldSub(f.node, pp.setup.capacityField, nil)
   495  		} else {
   496  			pp.ns.SetFieldSub(f.node, pp.setup.capacityField, f.newCap)
   497  		}
   498  	}
   499  }
   500  
   501  // tryActivate tries to activate inactive nodes if possible
   502  func (pp *priorityPool) tryActivate(commit bool) []capUpdate {
   503  	for pp.inactiveQueue.Size() > 0 {
   504  		c := pp.inactiveQueue.PopItem()
   505  		pp.setTempState(c)
   506  		pp.setTempBias(c, pp.activeBias)
   507  		pp.setTempCapacity(c, pp.minCap)
   508  		pp.activeQueue.Push(c)
   509  		pp.enforceLimits()
   510  		if c.tempCapacity > 0 {
   511  			commit = true
   512  			pp.setTempBias(c, 0)
   513  		} else {
   514  			break
   515  		}
   516  	}
   517  	pp.ccUpdateForced = true
   518  	return pp.finalizeChanges(commit)
   519  }
   520  
   521  // updatePriority gets the current priority value of the given node from the nodePriority
   522  // interface and performs the necessary changes. It is triggered by updateFlag.
   523  // Note: this function should run inside a NodeStateMachine operation
   524  func (pp *priorityPool) updatePriority(node *enode.Node) {
   525  	pp.lock.Lock()
   526  	pp.activeQueue.Refresh()
   527  	c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo)
   528  	if c == nil || !c.connected {
   529  		pp.lock.Unlock()
   530  		return
   531  	}
   532  	pp.removeFromQueues(c)
   533  	if c.capacity != 0 {
   534  		pp.activeQueue.Push(c)
   535  	} else {
   536  		pp.inactiveQueue.Push(c, pp.inactivePriority(c))
   537  	}
   538  	updates := pp.tryActivate(false)
   539  	pp.lock.Unlock()
   540  	pp.updateFlags(updates)
   541  }
   542  
   543  // capacityCurve is a snapshot of the priority pool contents in a format that can efficiently
   544  // estimate how much capacity could be granted to a given node at a given priority level.
   545  type capacityCurve struct {
   546  	points       []curvePoint       // curve points sorted in descending order of priority
   547  	index        map[enode.ID][]int // curve point indexes belonging to each node
   548  	excludeList  []int              // curve point indexes of excluded node
   549  	excludeFirst bool               // true if activeCount == maxCount
   550  }
   551  
   552  type curvePoint struct {
   553  	freeCap uint64 // available capacity and node count at the current priority level
   554  	nextPri int64  // next priority level where more capacity will be available
   555  }
   556  
   557  // getCapacityCurve returns a new or recently cached capacityCurve based on the contents of the pool
   558  func (pp *priorityPool) getCapacityCurve() *capacityCurve {
   559  	pp.lock.Lock()
   560  	defer pp.lock.Unlock()
   561  
   562  	now := pp.clock.Now()
   563  	dt := time.Duration(now - pp.ccUpdatedAt)
   564  	if !pp.ccUpdateForced && pp.cachedCurve != nil && dt < time.Second*10 {
   565  		return pp.cachedCurve
   566  	}
   567  
   568  	pp.ccUpdateForced = false
   569  	pp.ccUpdatedAt = now
   570  	curve := &capacityCurve{
   571  		index: make(map[enode.ID][]int),
   572  	}
   573  	pp.cachedCurve = curve
   574  
   575  	var excludeID enode.ID
   576  	excludeFirst := pp.maxCount == pp.activeCount
   577  	// reduce node capacities or remove nodes until nothing is left in the queue;
   578  	// record the available capacity and the necessary priority after each step
   579  	lastPri := int64(math.MinInt64)
   580  	for pp.activeCap > 0 {
   581  		cp := curvePoint{}
   582  		if pp.activeCap > pp.maxCap {
   583  			log.Error("Active capacity is greater than allowed maximum", "active", pp.activeCap, "maximum", pp.maxCap)
   584  		} else {
   585  			cp.freeCap = pp.maxCap - pp.activeCap
   586  		}
   587  		// temporarily increase activeCap to enforce reducing or removing a node capacity
   588  		tempCap := cp.freeCap + 1
   589  		pp.activeCap += tempCap
   590  		var next *ppNodeInfo
   591  		// enforceLimits removes the lowest priority node if it has minimal capacity,
   592  		// otherwise reduces its capacity
   593  		next, cp.nextPri = pp.enforceLimits()
   594  		if cp.nextPri < lastPri {
   595  			// enforce monotonicity which may be broken by continuously changing priorities
   596  			cp.nextPri = lastPri
   597  		} else {
   598  			lastPri = cp.nextPri
   599  		}
   600  		pp.activeCap -= tempCap
   601  		if next == nil {
   602  			log.Error("getCapacityCurve: cannot remove next element from the priority queue")
   603  			break
   604  		}
   605  		id := next.node.ID()
   606  		if excludeFirst {
   607  			// if the node count limit is already reached then mark the node with the
   608  			// lowest priority for exclusion
   609  			curve.excludeFirst = true
   610  			excludeID = id
   611  			excludeFirst = false
   612  		}
   613  		// multiple curve points and therefore multiple indexes may belong to a node
   614  		// if it was removed in multiple steps (if its capacity was more than the minimum)
   615  		curve.index[id] = append(curve.index[id], len(curve.points))
   616  		curve.points = append(curve.points, cp)
   617  	}
   618  	// restore original state of the queue
   619  	pp.finalizeChanges(false)
   620  	curve.points = append(curve.points, curvePoint{
   621  		freeCap: pp.maxCap,
   622  		nextPri: math.MaxInt64,
   623  	})
   624  	if curve.excludeFirst {
   625  		curve.excludeList = curve.index[excludeID]
   626  	}
   627  	return curve
   628  }
   629  
   630  // exclude returns a capacityCurve with the given node excluded from the original curve
   631  func (cc *capacityCurve) exclude(id enode.ID) *capacityCurve {
   632  	if excludeList, ok := cc.index[id]; ok {
   633  		// return a new version of the curve (only one excluded node can be selected)
   634  		// Note: if the first node was excluded by default (excludeFirst == true) then
   635  		// we can forget about that and exclude the node with the given id instead.
   636  		return &capacityCurve{
   637  			points:      cc.points,
   638  			index:       cc.index,
   639  			excludeList: excludeList,
   640  		}
   641  	}
   642  	return cc
   643  }
   644  
   645  func (cc *capacityCurve) getPoint(i int) curvePoint {
   646  	cp := cc.points[i]
   647  	if i == 0 && cc.excludeFirst {
   648  		cp.freeCap = 0
   649  		return cp
   650  	}
   651  	for ii := len(cc.excludeList) - 1; ii >= 0; ii-- {
   652  		ei := cc.excludeList[ii]
   653  		if ei < i {
   654  			break
   655  		}
   656  		e1, e2 := cc.points[ei], cc.points[ei+1]
   657  		cp.freeCap += e2.freeCap - e1.freeCap
   658  	}
   659  	return cp
   660  }
   661  
   662  // maxCapacity calculates the maximum capacity available for a node with a given
   663  // (monotonically decreasing) priority vs. capacity function. Note that if the requesting
   664  // node is already in the pool then it should be excluded from the curve in order to get
   665  // the correct result.
   666  func (cc *capacityCurve) maxCapacity(priority func(cap uint64) int64) uint64 {
   667  	min, max := 0, len(cc.points)-1 // the curve always has at least one point
   668  	for min < max {
   669  		mid := (min + max) / 2
   670  		cp := cc.getPoint(mid)
   671  		if cp.freeCap == 0 || priority(cp.freeCap) > cp.nextPri {
   672  			min = mid + 1
   673  		} else {
   674  			max = mid
   675  		}
   676  	}
   677  	cp2 := cc.getPoint(min)
   678  	if cp2.freeCap == 0 || min == 0 {
   679  		return cp2.freeCap
   680  	}
   681  	cp1 := cc.getPoint(min - 1)
   682  	if priority(cp2.freeCap) > cp1.nextPri {
   683  		return cp2.freeCap
   684  	}
   685  	minc, maxc := cp1.freeCap, cp2.freeCap-1
   686  	for minc < maxc {
   687  		midc := (minc + maxc + 1) / 2
   688  		if midc == 0 || priority(midc) > cp1.nextPri {
   689  			minc = midc
   690  		} else {
   691  			maxc = midc - 1
   692  		}
   693  	}
   694  	return maxc
   695  }