github.com/cryptogateway/go-paymex@v0.0.0-20210204174735-96277fb1e602/les/lespay/server/prioritypool.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package server
    18  
    19  import (
    20  	"math"
    21  	"reflect"
    22  	"sync"
    23  	"time"
    24  
    25  	"github.com/cryptogateway/go-paymex/common/mclock"
    26  	"github.com/cryptogateway/go-paymex/common/prque"
    27  	"github.com/cryptogateway/go-paymex/log"
    28  	"github.com/cryptogateway/go-paymex/p2p/enode"
    29  	"github.com/cryptogateway/go-paymex/p2p/nodestate"
    30  )
    31  
    32  const (
    33  	lazyQueueRefresh = time.Second * 10 // refresh period of the active queue
    34  )
    35  
    36  // PriorityPoolSetup contains node state flags and fields used by PriorityPool
    37  // Note: ActiveFlag and InactiveFlag can be controlled both externally and by the pool,
    38  // see PriorityPool description for details.
    39  type PriorityPoolSetup struct {
    40  	// controlled by PriorityPool
    41  	ActiveFlag, InactiveFlag       nodestate.Flags
    42  	CapacityField, ppNodeInfoField nodestate.Field
    43  	// external connections
    44  	updateFlag    nodestate.Flags
    45  	priorityField nodestate.Field
    46  }
    47  
    48  // NewPriorityPoolSetup creates a new PriorityPoolSetup and initializes the fields
    49  // and flags controlled by PriorityPool
    50  func NewPriorityPoolSetup(setup *nodestate.Setup) PriorityPoolSetup {
    51  	return PriorityPoolSetup{
    52  		ActiveFlag:      setup.NewFlag("active"),
    53  		InactiveFlag:    setup.NewFlag("inactive"),
    54  		CapacityField:   setup.NewField("capacity", reflect.TypeOf(uint64(0))),
    55  		ppNodeInfoField: setup.NewField("ppNodeInfo", reflect.TypeOf(&ppNodeInfo{})),
    56  	}
    57  }
    58  
    59  // Connect sets the fields and flags used by PriorityPool as an input
    60  func (pps *PriorityPoolSetup) Connect(priorityField nodestate.Field, updateFlag nodestate.Flags) {
    61  	pps.priorityField = priorityField // should implement nodePriority
    62  	pps.updateFlag = updateFlag       // triggers an immediate priority update
    63  }
    64  
    65  // PriorityPool handles a set of nodes where each node has a capacity (a scalar value)
    66  // and a priority (which can change over time and can also depend on the capacity).
    67  // A node is active if it has at least the necessary minimal amount of capacity while
    68  // inactive nodes have 0 capacity (values between 0 and the minimum are not allowed).
    69  // The pool ensures that the number and total capacity of all active nodes are limited
    70  // and the highest priority nodes are active at all times (limits can be changed
    71  // during operation with immediate effect).
    72  //
    73  // When activating clients a priority bias is applied in favor of the already active
    74  // nodes in order to avoid nodes quickly alternating between active and inactive states
    75  // when their priorities are close to each other. The bias is specified in terms of
    76  // duration (time) because priorities are expected to usually get lower over time and
    77  // therefore a future minimum prediction (see EstMinPriority) should monotonously
    78  // decrease with the specified time parameter.
    79  // This time bias can be interpreted as minimum expected active time at the given
    80  // capacity (if the threshold priority stays the same).
    81  //
    82  // Nodes in the pool always have either InactiveFlag or ActiveFlag set. A new node is
    83  // added to the pool by externally setting InactiveFlag. PriorityPool can switch a node
    84  // between InactiveFlag and ActiveFlag at any time. Nodes can be removed from the pool
    85  // by externally resetting both flags. ActiveFlag should not be set externally.
    86  //
    87  // The highest priority nodes in "inactive" state are moved to "active" state as soon as
    88  // the minimum capacity can be granted for them. The capacity of lower priority active
    89  // nodes is reduced or they are demoted to "inactive" state if their priority is
    90  // insufficient even at minimal capacity.
    91  type PriorityPool struct {
    92  	PriorityPoolSetup
    93  	ns                     *nodestate.NodeStateMachine
    94  	clock                  mclock.Clock
    95  	lock                   sync.Mutex
    96  	activeQueue            *prque.LazyQueue
    97  	inactiveQueue          *prque.Prque
    98  	changed                []*ppNodeInfo
    99  	activeCount, activeCap uint64
   100  	maxCount, maxCap       uint64
   101  	minCap                 uint64
   102  	activeBias             time.Duration
   103  	capacityStepDiv        uint64
   104  }
   105  
   106  // nodePriority interface provides current and estimated future priorities on demand
   107  type nodePriority interface {
   108  	// Priority should return the current priority of the node (higher is better)
   109  	Priority(now mclock.AbsTime, cap uint64) int64
   110  	// EstMinPriority should return a lower estimate for the minimum of the node priority
   111  	// value starting from the current moment until the given time. If the priority goes
   112  	// under the returned estimate before the specified moment then it is the caller's
   113  	// responsibility to signal with updateFlag.
   114  	EstMinPriority(until mclock.AbsTime, cap uint64, update bool) int64
   115  }
   116  
   117  // ppNodeInfo is the internal node descriptor of PriorityPool
   118  type ppNodeInfo struct {
   119  	nodePriority               nodePriority
   120  	node                       *enode.Node
   121  	connected                  bool
   122  	capacity, origCap          uint64
   123  	bias                       time.Duration
   124  	forced, changed            bool
   125  	activeIndex, inactiveIndex int
   126  }
   127  
   128  // NewPriorityPool creates a new PriorityPool
   129  func NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv uint64) *PriorityPool {
   130  	pp := &PriorityPool{
   131  		ns:                ns,
   132  		PriorityPoolSetup: setup,
   133  		clock:             clock,
   134  		activeQueue:       prque.NewLazyQueue(activeSetIndex, activePriority, activeMaxPriority, clock, lazyQueueRefresh),
   135  		inactiveQueue:     prque.New(inactiveSetIndex),
   136  		minCap:            minCap,
   137  		activeBias:        activeBias,
   138  		capacityStepDiv:   capacityStepDiv,
   139  	}
   140  
   141  	ns.SubscribeField(pp.priorityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
   142  		if newValue != nil {
   143  			c := &ppNodeInfo{
   144  				node:          node,
   145  				nodePriority:  newValue.(nodePriority),
   146  				activeIndex:   -1,
   147  				inactiveIndex: -1,
   148  			}
   149  			ns.SetFieldSub(node, pp.ppNodeInfoField, c)
   150  		} else {
   151  			ns.SetStateSub(node, nodestate.Flags{}, pp.ActiveFlag.Or(pp.InactiveFlag), 0)
   152  			if n, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); n != nil {
   153  				pp.disconnectedNode(n)
   154  			}
   155  			ns.SetFieldSub(node, pp.CapacityField, nil)
   156  			ns.SetFieldSub(node, pp.ppNodeInfoField, nil)
   157  		}
   158  	})
   159  	ns.SubscribeState(pp.ActiveFlag.Or(pp.InactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
   160  		if c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); c != nil {
   161  			if oldState.IsEmpty() {
   162  				pp.connectedNode(c)
   163  			}
   164  			if newState.IsEmpty() {
   165  				pp.disconnectedNode(c)
   166  			}
   167  		}
   168  	})
   169  	ns.SubscribeState(pp.updateFlag, func(node *enode.Node, oldState, newState nodestate.Flags) {
   170  		if !newState.IsEmpty() {
   171  			pp.updatePriority(node)
   172  		}
   173  	})
   174  	return pp
   175  }
   176  
   177  // RequestCapacity checks whether changing the capacity of a node to the given target
   178  // is possible (bias is applied in favor of other active nodes if the target is higher
   179  // than the current capacity).
   180  // If setCap is true then it also performs the change if possible. The function returns
   181  // the minimum priority needed to do the change and whether it is currently allowed.
   182  // If setCap and allowed are both true then the caller can assume that the change was
   183  // successful.
   184  // Note: priorityField should always be set before calling RequestCapacity. If setCap
   185  // is false then both InactiveFlag and ActiveFlag can be unset and they are not changed
   186  // by this function call either.
   187  // Note 2: this function should run inside a NodeStateMachine operation
   188  func (pp *PriorityPool) RequestCapacity(node *enode.Node, targetCap uint64, bias time.Duration, setCap bool) (minPriority int64, allowed bool) {
   189  	pp.lock.Lock()
   190  	pp.activeQueue.Refresh()
   191  	var updates []capUpdate
   192  	defer func() {
   193  		pp.lock.Unlock()
   194  		pp.updateFlags(updates)
   195  	}()
   196  
   197  	if targetCap < pp.minCap {
   198  		targetCap = pp.minCap
   199  	}
   200  	c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo)
   201  	if c == nil {
   202  		log.Error("RequestCapacity called for unknown node", "id", node.ID())
   203  		return math.MaxInt64, false
   204  	}
   205  	var priority int64
   206  	if targetCap > c.capacity {
   207  		priority = c.nodePriority.EstMinPriority(pp.clock.Now()+mclock.AbsTime(bias), targetCap, false)
   208  	} else {
   209  		priority = c.nodePriority.Priority(pp.clock.Now(), targetCap)
   210  	}
   211  	pp.markForChange(c)
   212  	pp.setCapacity(c, targetCap)
   213  	c.forced = true
   214  	pp.activeQueue.Remove(c.activeIndex)
   215  	pp.inactiveQueue.Remove(c.inactiveIndex)
   216  	pp.activeQueue.Push(c)
   217  	minPriority = pp.enforceLimits()
   218  	// if capacity update is possible now then minPriority == math.MinInt64
   219  	// if it is not possible at all then minPriority == math.MaxInt64
   220  	allowed = priority > minPriority
   221  	updates = pp.finalizeChanges(setCap && allowed)
   222  	return
   223  }
   224  
   225  // SetLimits sets the maximum number and total capacity of simultaneously active nodes
   226  func (pp *PriorityPool) SetLimits(maxCount, maxCap uint64) {
   227  	pp.lock.Lock()
   228  	pp.activeQueue.Refresh()
   229  	var updates []capUpdate
   230  	defer func() {
   231  		pp.lock.Unlock()
   232  		pp.ns.Operation(func() { pp.updateFlags(updates) })
   233  	}()
   234  
   235  	inc := (maxCount > pp.maxCount) || (maxCap > pp.maxCap)
   236  	dec := (maxCount < pp.maxCount) || (maxCap < pp.maxCap)
   237  	pp.maxCount, pp.maxCap = maxCount, maxCap
   238  	if dec {
   239  		pp.enforceLimits()
   240  		updates = pp.finalizeChanges(true)
   241  	}
   242  	if inc {
   243  		updates = pp.tryActivate()
   244  	}
   245  }
   246  
   247  // SetActiveBias sets the bias applied when trying to activate inactive nodes
   248  func (pp *PriorityPool) SetActiveBias(bias time.Duration) {
   249  	pp.lock.Lock()
   250  	defer pp.lock.Unlock()
   251  
   252  	pp.activeBias = bias
   253  	pp.tryActivate()
   254  }
   255  
   256  // Active returns the number and total capacity of currently active nodes
   257  func (pp *PriorityPool) Active() (uint64, uint64) {
   258  	pp.lock.Lock()
   259  	defer pp.lock.Unlock()
   260  
   261  	return pp.activeCount, pp.activeCap
   262  }
   263  
   264  // inactiveSetIndex callback updates ppNodeInfo item index in inactiveQueue
   265  func inactiveSetIndex(a interface{}, index int) {
   266  	a.(*ppNodeInfo).inactiveIndex = index
   267  }
   268  
   269  // activeSetIndex callback updates ppNodeInfo item index in activeQueue
   270  func activeSetIndex(a interface{}, index int) {
   271  	a.(*ppNodeInfo).activeIndex = index
   272  }
   273  
   274  // invertPriority inverts a priority value. The active queue uses inverted priorities
   275  // because the node on the top is the first to be deactivated.
   276  func invertPriority(p int64) int64 {
   277  	if p == math.MinInt64 {
   278  		return math.MaxInt64
   279  	}
   280  	return -p
   281  }
   282  
   283  // activePriority callback returns actual priority of ppNodeInfo item in activeQueue
   284  func activePriority(a interface{}, now mclock.AbsTime) int64 {
   285  	c := a.(*ppNodeInfo)
   286  	if c.forced {
   287  		return math.MinInt64
   288  	}
   289  	if c.bias == 0 {
   290  		return invertPriority(c.nodePriority.Priority(now, c.capacity))
   291  	}
   292  	return invertPriority(c.nodePriority.EstMinPriority(now+mclock.AbsTime(c.bias), c.capacity, true))
   293  }
   294  
   295  // activeMaxPriority callback returns estimated maximum priority of ppNodeInfo item in activeQueue
   296  func activeMaxPriority(a interface{}, until mclock.AbsTime) int64 {
   297  	c := a.(*ppNodeInfo)
   298  	if c.forced {
   299  		return math.MinInt64
   300  	}
   301  	return invertPriority(c.nodePriority.EstMinPriority(until+mclock.AbsTime(c.bias), c.capacity, false))
   302  }
   303  
   304  // inactivePriority callback returns actual priority of ppNodeInfo item in inactiveQueue
   305  func (pp *PriorityPool) inactivePriority(p *ppNodeInfo) int64 {
   306  	return p.nodePriority.Priority(pp.clock.Now(), pp.minCap)
   307  }
   308  
   309  // connectedNode is called when a new node has been added to the pool (InactiveFlag set)
   310  // Note: this function should run inside a NodeStateMachine operation
   311  func (pp *PriorityPool) connectedNode(c *ppNodeInfo) {
   312  	pp.lock.Lock()
   313  	pp.activeQueue.Refresh()
   314  	var updates []capUpdate
   315  	defer func() {
   316  		pp.lock.Unlock()
   317  		pp.updateFlags(updates)
   318  	}()
   319  
   320  	if c.connected {
   321  		return
   322  	}
   323  	c.connected = true
   324  	pp.inactiveQueue.Push(c, pp.inactivePriority(c))
   325  	updates = pp.tryActivate()
   326  }
   327  
   328  // disconnectedNode is called when a node has been removed from the pool (both InactiveFlag
   329  // and ActiveFlag reset)
   330  // Note: this function should run inside a NodeStateMachine operation
   331  func (pp *PriorityPool) disconnectedNode(c *ppNodeInfo) {
   332  	pp.lock.Lock()
   333  	pp.activeQueue.Refresh()
   334  	var updates []capUpdate
   335  	defer func() {
   336  		pp.lock.Unlock()
   337  		pp.updateFlags(updates)
   338  	}()
   339  
   340  	if !c.connected {
   341  		return
   342  	}
   343  	c.connected = false
   344  	pp.activeQueue.Remove(c.activeIndex)
   345  	pp.inactiveQueue.Remove(c.inactiveIndex)
   346  	if c.capacity != 0 {
   347  		pp.setCapacity(c, 0)
   348  		updates = pp.tryActivate()
   349  	}
   350  }
   351  
   352  // markForChange internally puts a node in a temporary state that can either be reverted
   353  // or confirmed later. This temporary state allows changing the capacity of a node and
   354  // moving it between the active and inactive queue. ActiveFlag/InactiveFlag and
   355  // CapacityField are not changed while the changes are still temporary.
   356  func (pp *PriorityPool) markForChange(c *ppNodeInfo) {
   357  	if c.changed {
   358  		return
   359  	}
   360  	c.changed = true
   361  	c.origCap = c.capacity
   362  	pp.changed = append(pp.changed, c)
   363  }
   364  
   365  // setCapacity changes the capacity of a node and adjusts activeCap and activeCount
   366  // accordingly. Note that this change is performed in the temporary state so it should
   367  // be called after markForChange and before finalizeChanges.
   368  func (pp *PriorityPool) setCapacity(n *ppNodeInfo, cap uint64) {
   369  	pp.activeCap += cap - n.capacity
   370  	if n.capacity == 0 {
   371  		pp.activeCount++
   372  	}
   373  	if cap == 0 {
   374  		pp.activeCount--
   375  	}
   376  	n.capacity = cap
   377  }
   378  
   379  // enforceLimits enforces active node count and total capacity limits. It returns the
   380  // lowest active node priority. Note that this function is performed on the temporary
   381  // internal state.
   382  func (pp *PriorityPool) enforceLimits() int64 {
   383  	if pp.activeCap <= pp.maxCap && pp.activeCount <= pp.maxCount {
   384  		return math.MinInt64
   385  	}
   386  	var maxActivePriority int64
   387  	pp.activeQueue.MultiPop(func(data interface{}, priority int64) bool {
   388  		c := data.(*ppNodeInfo)
   389  		pp.markForChange(c)
   390  		maxActivePriority = priority
   391  		if c.capacity == pp.minCap {
   392  			pp.setCapacity(c, 0)
   393  		} else {
   394  			sub := c.capacity / pp.capacityStepDiv
   395  			if c.capacity-sub < pp.minCap {
   396  				sub = c.capacity - pp.minCap
   397  			}
   398  			pp.setCapacity(c, c.capacity-sub)
   399  			pp.activeQueue.Push(c)
   400  		}
   401  		return pp.activeCap > pp.maxCap || pp.activeCount > pp.maxCount
   402  	})
   403  	return invertPriority(maxActivePriority)
   404  }
   405  
   406  // finalizeChanges either commits or reverts temporary changes. The necessary capacity
   407  // field and according flag updates are not performed here but returned in a list because
   408  // they should be performed while the mutex is not held.
   409  func (pp *PriorityPool) finalizeChanges(commit bool) (updates []capUpdate) {
   410  	for _, c := range pp.changed {
   411  		// always remove and push back in order to update biased/forced priority
   412  		pp.activeQueue.Remove(c.activeIndex)
   413  		pp.inactiveQueue.Remove(c.inactiveIndex)
   414  		c.bias = 0
   415  		c.forced = false
   416  		c.changed = false
   417  		if !commit {
   418  			pp.setCapacity(c, c.origCap)
   419  		}
   420  		if c.connected {
   421  			if c.capacity != 0 {
   422  				pp.activeQueue.Push(c)
   423  			} else {
   424  				pp.inactiveQueue.Push(c, pp.inactivePriority(c))
   425  			}
   426  			if c.capacity != c.origCap && commit {
   427  				updates = append(updates, capUpdate{c.node, c.origCap, c.capacity})
   428  			}
   429  		}
   430  		c.origCap = 0
   431  	}
   432  	pp.changed = nil
   433  	return
   434  }
   435  
   436  // capUpdate describes a CapacityField and ActiveFlag/InactiveFlag update
   437  type capUpdate struct {
   438  	node           *enode.Node
   439  	oldCap, newCap uint64
   440  }
   441  
   442  // updateFlags performs CapacityField and ActiveFlag/InactiveFlag updates while the
   443  // pool mutex is not held
   444  // Note: this function should run inside a NodeStateMachine operation
   445  func (pp *PriorityPool) updateFlags(updates []capUpdate) {
   446  	for _, f := range updates {
   447  		if f.oldCap == 0 {
   448  			pp.ns.SetStateSub(f.node, pp.ActiveFlag, pp.InactiveFlag, 0)
   449  		}
   450  		if f.newCap == 0 {
   451  			pp.ns.SetStateSub(f.node, pp.InactiveFlag, pp.ActiveFlag, 0)
   452  			pp.ns.SetFieldSub(f.node, pp.CapacityField, nil)
   453  		} else {
   454  			pp.ns.SetFieldSub(f.node, pp.CapacityField, f.newCap)
   455  		}
   456  	}
   457  }
   458  
   459  // tryActivate tries to activate inactive nodes if possible
   460  func (pp *PriorityPool) tryActivate() []capUpdate {
   461  	var commit bool
   462  	for pp.inactiveQueue.Size() > 0 {
   463  		c := pp.inactiveQueue.PopItem().(*ppNodeInfo)
   464  		pp.markForChange(c)
   465  		pp.setCapacity(c, pp.minCap)
   466  		c.bias = pp.activeBias
   467  		pp.activeQueue.Push(c)
   468  		pp.enforceLimits()
   469  		if c.capacity > 0 {
   470  			commit = true
   471  		} else {
   472  			break
   473  		}
   474  	}
   475  	return pp.finalizeChanges(commit)
   476  }
   477  
   478  // updatePriority gets the current priority value of the given node from the nodePriority
   479  // interface and performs the necessary changes. It is triggered by updateFlag.
   480  // Note: this function should run inside a NodeStateMachine operation
   481  func (pp *PriorityPool) updatePriority(node *enode.Node) {
   482  	pp.lock.Lock()
   483  	pp.activeQueue.Refresh()
   484  	var updates []capUpdate
   485  	defer func() {
   486  		pp.lock.Unlock()
   487  		pp.updateFlags(updates)
   488  	}()
   489  
   490  	c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo)
   491  	if c == nil || !c.connected {
   492  		return
   493  	}
   494  	pp.activeQueue.Remove(c.activeIndex)
   495  	pp.inactiveQueue.Remove(c.inactiveIndex)
   496  	if c.capacity != 0 {
   497  		pp.activeQueue.Push(c)
   498  	} else {
   499  		pp.inactiveQueue.Push(c, pp.inactivePriority(c))
   500  	}
   501  	updates = pp.tryActivate()
   502  }