github.com/aidoskuneen/adk-node@v0.0.0-20220315131952-2e32567cb7f4/les/vflux/server/prioritypool.go (about)

     1  // Copyright 2021 The adkgo Authors
     2  // This file is part of the adkgo library (adapted for adkgo from go--ethereum v1.10.8).
     3  //
     4  // the adkgo library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // the adkgo library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the adkgo library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package server
    18  
    19  import (
    20  	"math"
    21  	"sync"
    22  	"time"
    23  
    24  	"github.com/aidoskuneen/adk-node/common/mclock"
    25  	"github.com/aidoskuneen/adk-node/common/prque"
    26  	"github.com/aidoskuneen/adk-node/log"
    27  	"github.com/aidoskuneen/adk-node/p2p/enode"
    28  	"github.com/aidoskuneen/adk-node/p2p/nodestate"
    29  )
    30  
    31  const (
    32  	lazyQueueRefresh = time.Second * 10 // refresh period of the active queue
    33  )
    34  
    35  // priorityPool handles a set of nodes where each node has a capacity (a scalar value)
    36  // and a priority (which can change over time and can also depend on the capacity).
    37  // A node is active if it has at least the necessary minimal amount of capacity while
    38  // inactive nodes have 0 capacity (values between 0 and the minimum are not allowed).
    39  // The pool ensures that the number and total capacity of all active nodes are limited
    40  // and the highest priority nodes are active at all times (limits can be changed
    41  // during operation with immediate effect).
    42  //
    43  // When activating clients a priority bias is applied in favor of the already active
    44  // nodes in order to avoid nodes quickly alternating between active and inactive states
    45  // when their priorities are close to each other. The bias is specified in terms of
    46  // duration (time) because priorities are expected to usually get lower over time and
    47  // therefore a future minimum prediction (see EstMinPriority) should monotonously
    48  // decrease with the specified time parameter.
    49  // This time bias can be interpreted as minimum expected active time at the given
    50  // capacity (if the threshold priority stays the same).
    51  //
    52  // Nodes in the pool always have either inactiveFlag or activeFlag set. A new node is
    53  // added to the pool by externally setting inactiveFlag. priorityPool can switch a node
    54  // between inactiveFlag and activeFlag at any time. Nodes can be removed from the pool
    55  // by externally resetting both flags. activeFlag should not be set externally.
    56  //
    57  // The highest priority nodes in "inactive" state are moved to "active" state as soon as
    58  // the minimum capacity can be granted for them. The capacity of lower priority active
    59  // nodes is reduced or they are demoted to "inactive" state if their priority is
    60  // insufficient even at minimal capacity.
    61  type priorityPool struct {
    62  	setup                        *serverSetup
    63  	ns                           *nodestate.NodeStateMachine
    64  	clock                        mclock.Clock
    65  	lock                         sync.Mutex
    66  	maxCount, maxCap             uint64
    67  	minCap                       uint64
    68  	activeBias                   time.Duration
    69  	capacityStepDiv, fineStepDiv uint64
    70  
    71  	// The snapshot of priority pool for query.
    72  	cachedCurve    *capacityCurve
    73  	ccUpdatedAt    mclock.AbsTime
    74  	ccUpdateForced bool
    75  
    76  	// Runtime status of prioritypool, represents the
    77  	// temporary state if tempState is not empty
    78  	tempState              []*ppNodeInfo
    79  	activeCount, activeCap uint64
    80  	activeQueue            *prque.LazyQueue
    81  	inactiveQueue          *prque.Prque
    82  }
    83  
    84  // ppNodeInfo is the internal node descriptor of priorityPool
    85  type ppNodeInfo struct {
    86  	nodePriority               nodePriority
    87  	node                       *enode.Node
    88  	connected                  bool
    89  	capacity                   uint64 // only changed when temporary state is committed
    90  	activeIndex, inactiveIndex int
    91  
    92  	tempState    bool   // should only be true while the priorityPool lock is held
    93  	tempCapacity uint64 // equals capacity when tempState is false
    94  
    95  	// the following fields only affect the temporary state and they are set to their
    96  	// default value when leaving the temp state
    97  	minTarget, stepDiv uint64
    98  	bias               time.Duration
    99  }
   100  
   101  // newPriorityPool creates a new priorityPool
   102  func newPriorityPool(ns *nodestate.NodeStateMachine, setup *serverSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv, fineStepDiv uint64) *priorityPool {
   103  	pp := &priorityPool{
   104  		setup:           setup,
   105  		ns:              ns,
   106  		clock:           clock,
   107  		inactiveQueue:   prque.New(inactiveSetIndex),
   108  		minCap:          minCap,
   109  		activeBias:      activeBias,
   110  		capacityStepDiv: capacityStepDiv,
   111  		fineStepDiv:     fineStepDiv,
   112  	}
   113  	if pp.activeBias < time.Duration(1) {
   114  		pp.activeBias = time.Duration(1)
   115  	}
   116  	pp.activeQueue = prque.NewLazyQueue(activeSetIndex, activePriority, pp.activeMaxPriority, clock, lazyQueueRefresh)
   117  
   118  	ns.SubscribeField(pp.setup.balanceField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
   119  		if newValue != nil {
   120  			c := &ppNodeInfo{
   121  				node:          node,
   122  				nodePriority:  newValue.(nodePriority),
   123  				activeIndex:   -1,
   124  				inactiveIndex: -1,
   125  			}
   126  			ns.SetFieldSub(node, pp.setup.queueField, c)
   127  			ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0)
   128  		} else {
   129  			ns.SetStateSub(node, nodestate.Flags{}, pp.setup.activeFlag.Or(pp.setup.inactiveFlag), 0)
   130  			if n, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo); n != nil {
   131  				pp.disconnectedNode(n)
   132  			}
   133  			ns.SetFieldSub(node, pp.setup.capacityField, nil)
   134  			ns.SetFieldSub(node, pp.setup.queueField, nil)
   135  		}
   136  	})
   137  	ns.SubscribeState(pp.setup.activeFlag.Or(pp.setup.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
   138  		if c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo); c != nil {
   139  			if oldState.IsEmpty() {
   140  				pp.connectedNode(c)
   141  			}
   142  			if newState.IsEmpty() {
   143  				pp.disconnectedNode(c)
   144  			}
   145  		}
   146  	})
   147  	ns.SubscribeState(pp.setup.updateFlag, func(node *enode.Node, oldState, newState nodestate.Flags) {
   148  		if !newState.IsEmpty() {
   149  			pp.updatePriority(node)
   150  		}
   151  	})
   152  	return pp
   153  }
   154  
   155  // requestCapacity tries to set the capacity of a connected node to the highest possible
   156  // value inside the given target range. If maxTarget is not reachable then the capacity is
   157  // iteratively reduced in fine steps based on the fineStepDiv parameter until minTarget is reached.
   158  // The function returns the new capacity if successful and the original capacity otherwise.
   159  // Note: this function should run inside a NodeStateMachine operation
   160  func (pp *priorityPool) requestCapacity(node *enode.Node, minTarget, maxTarget uint64, bias time.Duration) uint64 {
   161  	pp.lock.Lock()
   162  	pp.activeQueue.Refresh()
   163  
   164  	if minTarget < pp.minCap {
   165  		minTarget = pp.minCap
   166  	}
   167  	if maxTarget < minTarget {
   168  		maxTarget = minTarget
   169  	}
   170  	if bias < pp.activeBias {
   171  		bias = pp.activeBias
   172  	}
   173  	c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo)
   174  	if c == nil {
   175  		log.Error("requestCapacity called for unknown node", "id", node.ID())
   176  		pp.lock.Unlock()
   177  		return 0
   178  	}
   179  	pp.setTempState(c)
   180  	if maxTarget > c.capacity {
   181  		pp.setTempStepDiv(c, pp.fineStepDiv)
   182  		pp.setTempBias(c, bias)
   183  	}
   184  	pp.setTempCapacity(c, maxTarget)
   185  	c.minTarget = minTarget
   186  	pp.activeQueue.Remove(c.activeIndex)
   187  	pp.inactiveQueue.Remove(c.inactiveIndex)
   188  	pp.activeQueue.Push(c)
   189  	pp.enforceLimits()
   190  	updates := pp.finalizeChanges(c.tempCapacity >= minTarget && c.tempCapacity <= maxTarget && c.tempCapacity != c.capacity)
   191  	pp.lock.Unlock()
   192  	pp.updateFlags(updates)
   193  	return c.capacity
   194  }
   195  
   196  // SetLimits sets the maximum number and total capacity of simultaneously active nodes
   197  func (pp *priorityPool) SetLimits(maxCount, maxCap uint64) {
   198  	pp.lock.Lock()
   199  	pp.activeQueue.Refresh()
   200  	inc := (maxCount > pp.maxCount) || (maxCap > pp.maxCap)
   201  	dec := (maxCount < pp.maxCount) || (maxCap < pp.maxCap)
   202  	pp.maxCount, pp.maxCap = maxCount, maxCap
   203  
   204  	var updates []capUpdate
   205  	if dec {
   206  		pp.enforceLimits()
   207  		updates = pp.finalizeChanges(true)
   208  	}
   209  	if inc {
   210  		updates = append(updates, pp.tryActivate(false)...)
   211  	}
   212  	pp.lock.Unlock()
   213  	pp.ns.Operation(func() { pp.updateFlags(updates) })
   214  }
   215  
   216  // setActiveBias sets the bias applied when trying to activate inactive nodes
   217  func (pp *priorityPool) setActiveBias(bias time.Duration) {
   218  	pp.lock.Lock()
   219  	pp.activeBias = bias
   220  	if pp.activeBias < time.Duration(1) {
   221  		pp.activeBias = time.Duration(1)
   222  	}
   223  	updates := pp.tryActivate(false)
   224  	pp.lock.Unlock()
   225  	pp.ns.Operation(func() { pp.updateFlags(updates) })
   226  }
   227  
   228  // Active returns the number and total capacity of currently active nodes
   229  func (pp *priorityPool) Active() (uint64, uint64) {
   230  	pp.lock.Lock()
   231  	defer pp.lock.Unlock()
   232  
   233  	return pp.activeCount, pp.activeCap
   234  }
   235  
   236  // Limits returns the maximum allowed number and total capacity of active nodes
   237  func (pp *priorityPool) Limits() (uint64, uint64) {
   238  	pp.lock.Lock()
   239  	defer pp.lock.Unlock()
   240  
   241  	return pp.maxCount, pp.maxCap
   242  }
   243  
   244  // inactiveSetIndex callback updates ppNodeInfo item index in inactiveQueue
   245  func inactiveSetIndex(a interface{}, index int) {
   246  	a.(*ppNodeInfo).inactiveIndex = index
   247  }
   248  
   249  // activeSetIndex callback updates ppNodeInfo item index in activeQueue
   250  func activeSetIndex(a interface{}, index int) {
   251  	a.(*ppNodeInfo).activeIndex = index
   252  }
   253  
   254  // invertPriority inverts a priority value. The active queue uses inverted priorities
   255  // because the node on the top is the first to be deactivated.
   256  func invertPriority(p int64) int64 {
   257  	if p == math.MinInt64 {
   258  		return math.MaxInt64
   259  	}
   260  	return -p
   261  }
   262  
   263  // activePriority callback returns actual priority of ppNodeInfo item in activeQueue
   264  func activePriority(a interface{}) int64 {
   265  	c := a.(*ppNodeInfo)
   266  	if c.bias == 0 {
   267  		return invertPriority(c.nodePriority.priority(c.tempCapacity))
   268  	} else {
   269  		return invertPriority(c.nodePriority.estimatePriority(c.tempCapacity, 0, 0, c.bias, true))
   270  	}
   271  }
   272  
   273  // activeMaxPriority callback returns estimated maximum priority of ppNodeInfo item in activeQueue
   274  func (pp *priorityPool) activeMaxPriority(a interface{}, until mclock.AbsTime) int64 {
   275  	c := a.(*ppNodeInfo)
   276  	future := time.Duration(until - pp.clock.Now())
   277  	if future < 0 {
   278  		future = 0
   279  	}
   280  	return invertPriority(c.nodePriority.estimatePriority(c.tempCapacity, 0, future, c.bias, false))
   281  }
   282  
   283  // inactivePriority callback returns actual priority of ppNodeInfo item in inactiveQueue
   284  func (pp *priorityPool) inactivePriority(p *ppNodeInfo) int64 {
   285  	return p.nodePriority.priority(pp.minCap)
   286  }
   287  
   288  // connectedNode is called when a new node has been added to the pool (inactiveFlag set)
   289  // Note: this function should run inside a NodeStateMachine operation
   290  func (pp *priorityPool) connectedNode(c *ppNodeInfo) {
   291  	pp.lock.Lock()
   292  	pp.activeQueue.Refresh()
   293  	if c.connected {
   294  		pp.lock.Unlock()
   295  		return
   296  	}
   297  	c.connected = true
   298  	pp.inactiveQueue.Push(c, pp.inactivePriority(c))
   299  	updates := pp.tryActivate(false)
   300  	pp.lock.Unlock()
   301  	pp.updateFlags(updates)
   302  }
   303  
   304  // disconnectedNode is called when a node has been removed from the pool (both inactiveFlag
   305  // and activeFlag reset)
   306  // Note: this function should run inside a NodeStateMachine operation
   307  func (pp *priorityPool) disconnectedNode(c *ppNodeInfo) {
   308  	pp.lock.Lock()
   309  	pp.activeQueue.Refresh()
   310  	if !c.connected {
   311  		pp.lock.Unlock()
   312  		return
   313  	}
   314  	c.connected = false
   315  	pp.activeQueue.Remove(c.activeIndex)
   316  	pp.inactiveQueue.Remove(c.inactiveIndex)
   317  
   318  	var updates []capUpdate
   319  	if c.capacity != 0 {
   320  		pp.setTempState(c)
   321  		pp.setTempCapacity(c, 0)
   322  		updates = pp.tryActivate(true)
   323  	}
   324  	pp.lock.Unlock()
   325  	pp.updateFlags(updates)
   326  }
   327  
   328  // setTempState internally puts a node in a temporary state that can either be reverted
   329  // or confirmed later. This temporary state allows changing the capacity of a node and
   330  // moving it between the active and inactive queue. activeFlag/inactiveFlag and
   331  // capacityField are not changed while the changes are still temporary.
   332  func (pp *priorityPool) setTempState(c *ppNodeInfo) {
   333  	if c.tempState {
   334  		return
   335  	}
   336  	c.tempState = true
   337  	if c.tempCapacity != c.capacity { // should never happen
   338  		log.Error("tempCapacity != capacity when entering tempState")
   339  	}
   340  	// Assign all the defaults to the temp state.
   341  	c.minTarget = pp.minCap
   342  	c.stepDiv = pp.capacityStepDiv
   343  	c.bias = 0
   344  	pp.tempState = append(pp.tempState, c)
   345  }
   346  
   347  // unsetTempState revokes the temp status of the node and reset all internal
   348  // fields to the default value.
   349  func (pp *priorityPool) unsetTempState(c *ppNodeInfo) {
   350  	if !c.tempState {
   351  		return
   352  	}
   353  	c.tempState = false
   354  	if c.tempCapacity != c.capacity { // should never happen
   355  		log.Error("tempCapacity != capacity when leaving tempState")
   356  	}
   357  	c.minTarget = pp.minCap
   358  	c.stepDiv = pp.capacityStepDiv
   359  	c.bias = 0
   360  }
   361  
   362  // setTempCapacity changes the capacity of a node in the temporary state and adjusts
   363  // activeCap and activeCount accordingly. Since this change is performed in the temporary
   364  // state it should be called after setTempState and before finalizeChanges.
   365  func (pp *priorityPool) setTempCapacity(c *ppNodeInfo, cap uint64) {
   366  	if !c.tempState { // should never happen
   367  		log.Error("Node is not in temporary state")
   368  		return
   369  	}
   370  	pp.activeCap += cap - c.tempCapacity
   371  	if c.tempCapacity == 0 {
   372  		pp.activeCount++
   373  	}
   374  	if cap == 0 {
   375  		pp.activeCount--
   376  	}
   377  	c.tempCapacity = cap
   378  }
   379  
   380  // setTempBias changes the connection bias of a node in the temporary state.
   381  func (pp *priorityPool) setTempBias(c *ppNodeInfo, bias time.Duration) {
   382  	if !c.tempState { // should never happen
   383  		log.Error("Node is not in temporary state")
   384  		return
   385  	}
   386  	c.bias = bias
   387  }
   388  
   389  // setTempStepDiv changes the capacity divisor of a node in the temporary state.
   390  func (pp *priorityPool) setTempStepDiv(c *ppNodeInfo, stepDiv uint64) {
   391  	if !c.tempState { // should never happen
   392  		log.Error("Node is not in temporary state")
   393  		return
   394  	}
   395  	c.stepDiv = stepDiv
   396  }
   397  
   398  // enforceLimits enforces active node count and total capacity limits. It returns the
   399  // lowest active node priority. Note that this function is performed on the temporary
   400  // internal state.
   401  func (pp *priorityPool) enforceLimits() (*ppNodeInfo, int64) {
   402  	if pp.activeCap <= pp.maxCap && pp.activeCount <= pp.maxCount {
   403  		return nil, math.MinInt64
   404  	}
   405  	var (
   406  		c                 *ppNodeInfo
   407  		maxActivePriority int64
   408  	)
   409  	pp.activeQueue.MultiPop(func(data interface{}, priority int64) bool {
   410  		c = data.(*ppNodeInfo)
   411  		pp.setTempState(c)
   412  		maxActivePriority = priority
   413  		if c.tempCapacity == c.minTarget || pp.activeCount > pp.maxCount {
   414  			pp.setTempCapacity(c, 0)
   415  		} else {
   416  			sub := c.tempCapacity / c.stepDiv
   417  			if sub == 0 {
   418  				sub = 1
   419  			}
   420  			if c.tempCapacity-sub < c.minTarget {
   421  				sub = c.tempCapacity - c.minTarget
   422  			}
   423  			pp.setTempCapacity(c, c.tempCapacity-sub)
   424  			pp.activeQueue.Push(c)
   425  		}
   426  		return pp.activeCap > pp.maxCap || pp.activeCount > pp.maxCount
   427  	})
   428  	return c, invertPriority(maxActivePriority)
   429  }
   430  
   431  // finalizeChanges either commits or reverts temporary changes. The necessary capacity
   432  // field and according flag updates are not performed here but returned in a list because
   433  // they should be performed while the mutex is not held.
   434  func (pp *priorityPool) finalizeChanges(commit bool) (updates []capUpdate) {
   435  	for _, c := range pp.tempState {
   436  		// always remove and push back in order to update biased priority
   437  		pp.activeQueue.Remove(c.activeIndex)
   438  		pp.inactiveQueue.Remove(c.inactiveIndex)
   439  		oldCapacity := c.capacity
   440  		if commit {
   441  			c.capacity = c.tempCapacity
   442  		} else {
   443  			pp.setTempCapacity(c, c.capacity) // revert activeCount/activeCap
   444  		}
   445  		pp.unsetTempState(c)
   446  
   447  		if c.connected {
   448  			if c.capacity != 0 {
   449  				pp.activeQueue.Push(c)
   450  			} else {
   451  				pp.inactiveQueue.Push(c, pp.inactivePriority(c))
   452  			}
   453  			if c.capacity != oldCapacity {
   454  				updates = append(updates, capUpdate{c.node, oldCapacity, c.capacity})
   455  			}
   456  		}
   457  	}
   458  	pp.tempState = nil
   459  	if commit {
   460  		pp.ccUpdateForced = true
   461  	}
   462  	return
   463  }
   464  
   465  // capUpdate describes a capacityField and activeFlag/inactiveFlag update
   466  type capUpdate struct {
   467  	node           *enode.Node
   468  	oldCap, newCap uint64
   469  }
   470  
   471  // updateFlags performs capacityField and activeFlag/inactiveFlag updates while the
   472  // pool mutex is not held
   473  // Note: this function should run inside a NodeStateMachine operation
   474  func (pp *priorityPool) updateFlags(updates []capUpdate) {
   475  	for _, f := range updates {
   476  		if f.oldCap == 0 {
   477  			pp.ns.SetStateSub(f.node, pp.setup.activeFlag, pp.setup.inactiveFlag, 0)
   478  		}
   479  		if f.newCap == 0 {
   480  			pp.ns.SetStateSub(f.node, pp.setup.inactiveFlag, pp.setup.activeFlag, 0)
   481  			pp.ns.SetFieldSub(f.node, pp.setup.capacityField, nil)
   482  		} else {
   483  			pp.ns.SetFieldSub(f.node, pp.setup.capacityField, f.newCap)
   484  		}
   485  	}
   486  }
   487  
   488  // tryActivate tries to activate inactive nodes if possible
   489  func (pp *priorityPool) tryActivate(commit bool) []capUpdate {
   490  	for pp.inactiveQueue.Size() > 0 {
   491  		c := pp.inactiveQueue.PopItem().(*ppNodeInfo)
   492  		pp.setTempState(c)
   493  		pp.setTempBias(c, pp.activeBias)
   494  		pp.setTempCapacity(c, pp.minCap)
   495  		pp.activeQueue.Push(c)
   496  		pp.enforceLimits()
   497  		if c.tempCapacity > 0 {
   498  			commit = true
   499  			pp.setTempBias(c, 0)
   500  		} else {
   501  			break
   502  		}
   503  	}
   504  	pp.ccUpdateForced = true
   505  	return pp.finalizeChanges(commit)
   506  }
   507  
   508  // updatePriority gets the current priority value of the given node from the nodePriority
   509  // interface and performs the necessary changes. It is triggered by updateFlag.
   510  // Note: this function should run inside a NodeStateMachine operation
   511  func (pp *priorityPool) updatePriority(node *enode.Node) {
   512  	pp.lock.Lock()
   513  	pp.activeQueue.Refresh()
   514  	c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo)
   515  	if c == nil || !c.connected {
   516  		pp.lock.Unlock()
   517  		return
   518  	}
   519  	pp.activeQueue.Remove(c.activeIndex)
   520  	pp.inactiveQueue.Remove(c.inactiveIndex)
   521  	if c.capacity != 0 {
   522  		pp.activeQueue.Push(c)
   523  	} else {
   524  		pp.inactiveQueue.Push(c, pp.inactivePriority(c))
   525  	}
   526  	updates := pp.tryActivate(false)
   527  	pp.lock.Unlock()
   528  	pp.updateFlags(updates)
   529  }
   530  
   531  // capacityCurve is a snapshot of the priority pool contents in a format that can efficiently
   532  // estimate how much capacity could be granted to a given node at a given priority level.
   533  type capacityCurve struct {
   534  	points       []curvePoint       // curve points sorted in descending order of priority
   535  	index        map[enode.ID][]int // curve point indexes belonging to each node
   536  	excludeList  []int              // curve point indexes of excluded node
   537  	excludeFirst bool               // true if activeCount == maxCount
   538  }
   539  
   540  type curvePoint struct {
   541  	freeCap uint64 // available capacity and node count at the current priority level
   542  	nextPri int64  // next priority level where more capacity will be available
   543  }
   544  
   545  // getCapacityCurve returns a new or recently cached capacityCurve based on the contents of the pool
   546  func (pp *priorityPool) getCapacityCurve() *capacityCurve {
   547  	pp.lock.Lock()
   548  	defer pp.lock.Unlock()
   549  
   550  	now := pp.clock.Now()
   551  	dt := time.Duration(now - pp.ccUpdatedAt)
   552  	if !pp.ccUpdateForced && pp.cachedCurve != nil && dt < time.Second*10 {
   553  		return pp.cachedCurve
   554  	}
   555  
   556  	pp.ccUpdateForced = false
   557  	pp.ccUpdatedAt = now
   558  	curve := &capacityCurve{
   559  		index: make(map[enode.ID][]int),
   560  	}
   561  	pp.cachedCurve = curve
   562  
   563  	var excludeID enode.ID
   564  	excludeFirst := pp.maxCount == pp.activeCount
   565  	// reduce node capacities or remove nodes until nothing is left in the queue;
   566  	// record the available capacity and the necessary priority after each step
   567  	lastPri := int64(math.MinInt64)
   568  	for pp.activeCap > 0 {
   569  		cp := curvePoint{}
   570  		if pp.activeCap > pp.maxCap {
   571  			log.Error("Active capacity is greater than allowed maximum", "active", pp.activeCap, "maximum", pp.maxCap)
   572  		} else {
   573  			cp.freeCap = pp.maxCap - pp.activeCap
   574  		}
   575  		// temporarily increase activeCap to enforce reducing or removing a node capacity
   576  		tempCap := cp.freeCap + 1
   577  		pp.activeCap += tempCap
   578  		var next *ppNodeInfo
   579  		// enforceLimits removes the lowest priority node if it has minimal capacity,
   580  		// otherwise reduces its capacity
   581  		next, cp.nextPri = pp.enforceLimits()
   582  		if cp.nextPri < lastPri {
   583  			// enforce monotonicity which may be broken by continuously changing priorities
   584  			cp.nextPri = lastPri
   585  		} else {
   586  			lastPri = cp.nextPri
   587  		}
   588  		pp.activeCap -= tempCap
   589  		if next == nil {
   590  			log.Error("getCapacityCurve: cannot remove next element from the priority queue")
   591  			break
   592  		}
   593  		id := next.node.ID()
   594  		if excludeFirst {
   595  			// if the node count limit is already reached then mark the node with the
   596  			// lowest priority for exclusion
   597  			curve.excludeFirst = true
   598  			excludeID = id
   599  			excludeFirst = false
   600  		}
   601  		// multiple curve points and therefore multiple indexes may belong to a node
   602  		// if it was removed in multiple steps (if its capacity was more than the minimum)
   603  		curve.index[id] = append(curve.index[id], len(curve.points))
   604  		curve.points = append(curve.points, cp)
   605  	}
   606  	// restore original state of the queue
   607  	pp.finalizeChanges(false)
   608  	curve.points = append(curve.points, curvePoint{
   609  		freeCap: pp.maxCap,
   610  		nextPri: math.MaxInt64,
   611  	})
   612  	if curve.excludeFirst {
   613  		curve.excludeList = curve.index[excludeID]
   614  	}
   615  	return curve
   616  }
   617  
   618  // exclude returns a capacityCurve with the given node excluded from the original curve
   619  func (cc *capacityCurve) exclude(id enode.ID) *capacityCurve {
   620  	if excludeList, ok := cc.index[id]; ok {
   621  		// return a new version of the curve (only one excluded node can be selected)
   622  		// Note: if the first node was excluded by default (excludeFirst == true) then
   623  		// we can forget about that and exclude the node with the given id instead.
   624  		return &capacityCurve{
   625  			points:      cc.points,
   626  			index:       cc.index,
   627  			excludeList: excludeList,
   628  		}
   629  	}
   630  	return cc
   631  }
   632  
   633  func (cc *capacityCurve) getPoint(i int) curvePoint {
   634  	cp := cc.points[i]
   635  	if i == 0 && cc.excludeFirst {
   636  		cp.freeCap = 0
   637  		return cp
   638  	}
   639  	for ii := len(cc.excludeList) - 1; ii >= 0; ii-- {
   640  		ei := cc.excludeList[ii]
   641  		if ei < i {
   642  			break
   643  		}
   644  		e1, e2 := cc.points[ei], cc.points[ei+1]
   645  		cp.freeCap += e2.freeCap - e1.freeCap
   646  	}
   647  	return cp
   648  }
   649  
   650  // maxCapacity calculates the maximum capacity available for a node with a given
   651  // (monotonically decreasing) priority vs. capacity function. Note that if the requesting
   652  // node is already in the pool then it should be excluded from the curve in order to get
   653  // the correct result.
   654  func (cc *capacityCurve) maxCapacity(priority func(cap uint64) int64) uint64 {
   655  	min, max := 0, len(cc.points)-1 // the curve always has at least one point
   656  	for min < max {
   657  		mid := (min + max) / 2
   658  		cp := cc.getPoint(mid)
   659  		if cp.freeCap == 0 || priority(cp.freeCap) > cp.nextPri {
   660  			min = mid + 1
   661  		} else {
   662  			max = mid
   663  		}
   664  	}
   665  	cp2 := cc.getPoint(min)
   666  	if cp2.freeCap == 0 || min == 0 {
   667  		return cp2.freeCap
   668  	}
   669  	cp1 := cc.getPoint(min - 1)
   670  	if priority(cp2.freeCap) > cp1.nextPri {
   671  		return cp2.freeCap
   672  	}
   673  	minc, maxc := cp1.freeCap, cp2.freeCap-1
   674  	for minc < maxc {
   675  		midc := (minc + maxc + 1) / 2
   676  		if midc == 0 || priority(midc) > cp1.nextPri {
   677  			minc = midc
   678  		} else {
   679  			maxc = midc - 1
   680  		}
   681  	}
   682  	return maxc
   683  }