github.com/aswedchain/aswed@v1.0.1/les/lespay/server/prioritypool.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package server
    18  
    19  import (
    20  	"math"
    21  	"reflect"
    22  	"sync"
    23  	"time"
    24  
    25  	"github.com/aswedchain/aswed/common/mclock"
    26  	"github.com/aswedchain/aswed/common/prque"
    27  	"github.com/aswedchain/aswed/log"
    28  	"github.com/aswedchain/aswed/p2p/enode"
    29  	"github.com/aswedchain/aswed/p2p/nodestate"
    30  )
    31  
    32  const (
    33  	lazyQueueRefresh = time.Second * 10 // refresh period of the active queue
    34  )
    35  
    36  // PriorityPoolSetup contains node state flags and fields used by PriorityPool
    37  // Note: ActiveFlag and InactiveFlag can be controlled both externally and by the pool,
    38  // see PriorityPool description for details.
    39  type PriorityPoolSetup struct {
    40  	// controlled by PriorityPool
    41  	ActiveFlag, InactiveFlag       nodestate.Flags
    42  	CapacityField, ppNodeInfoField nodestate.Field
    43  	// external connections
    44  	updateFlag    nodestate.Flags
    45  	priorityField nodestate.Field
    46  }
    47  
    48  // NewPriorityPoolSetup creates a new PriorityPoolSetup and initializes the fields
    49  // and flags controlled by PriorityPool
    50  func NewPriorityPoolSetup(setup *nodestate.Setup) PriorityPoolSetup {
    51  	return PriorityPoolSetup{
    52  		ActiveFlag:      setup.NewFlag("active"),
    53  		InactiveFlag:    setup.NewFlag("inactive"),
    54  		CapacityField:   setup.NewField("capacity", reflect.TypeOf(uint64(0))),
    55  		ppNodeInfoField: setup.NewField("ppNodeInfo", reflect.TypeOf(&ppNodeInfo{})),
    56  	}
    57  }
    58  
    59  // Connect sets the fields and flags used by PriorityPool as an input
    60  func (pps *PriorityPoolSetup) Connect(priorityField nodestate.Field, updateFlag nodestate.Flags) {
    61  	pps.priorityField = priorityField // should implement nodePriority
    62  	pps.updateFlag = updateFlag       // triggers an immediate priority update
    63  }
    64  
    65  // PriorityPool handles a set of nodes where each node has a capacity (a scalar value)
    66  // and a priority (which can change over time and can also depend on the capacity).
    67  // A node is active if it has at least the necessary minimal amount of capacity while
    68  // inactive nodes have 0 capacity (values between 0 and the minimum are not allowed).
    69  // The pool ensures that the number and total capacity of all active nodes are limited
    70  // and the highest priority nodes are active at all times (limits can be changed
    71  // during operation with immediate effect).
    72  //
    73  // When activating clients a priority bias is applied in favor of the already active
    74  // nodes in order to avoid nodes quickly alternating between active and inactive states
    75  // when their priorities are close to each other. The bias is specified in terms of
    76  // duration (time) because priorities are expected to usually get lower over time and
    77  // therefore a future minimum prediction (see EstMinPriority) should monotonously
    78  // decrease with the specified time parameter.
    79  // This time bias can be interpreted as minimum expected active time at the given
    80  // capacity (if the threshold priority stays the same).
    81  //
    82  // Nodes in the pool always have either InactiveFlag or ActiveFlag set. A new node is
    83  // added to the pool by externally setting InactiveFlag. PriorityPool can switch a node
    84  // between InactiveFlag and ActiveFlag at any time. Nodes can be removed from the pool
    85  // by externally resetting both flags. ActiveFlag should not be set externally.
    86  //
    87  // The highest priority nodes in "inactive" state are moved to "active" state as soon as
    88  // the minimum capacity can be granted for them. The capacity of lower priority active
    89  // nodes is reduced or they are demoted to "inactive" state if their priority is
    90  // insufficient even at minimal capacity.
    91  type PriorityPool struct {
    92  	PriorityPoolSetup
    93  	ns                     *nodestate.NodeStateMachine
    94  	clock                  mclock.Clock
    95  	lock                   sync.Mutex
    96  	activeQueue            *prque.LazyQueue
    97  	inactiveQueue          *prque.Prque
    98  	changed                []*ppNodeInfo
    99  	activeCount, activeCap uint64
   100  	maxCount, maxCap       uint64
   101  	minCap                 uint64
   102  	activeBias             time.Duration
   103  	capacityStepDiv        uint64
   104  }
   105  
   106  // nodePriority interface provides current and estimated future priorities on demand
   107  type nodePriority interface {
   108  	// Priority should return the current priority of the node (higher is better)
   109  	Priority(now mclock.AbsTime, cap uint64) int64
   110  	// EstMinPriority should return a lower estimate for the minimum of the node priority
   111  	// value starting from the current moment until the given time. If the priority goes
   112  	// under the returned estimate before the specified moment then it is the caller's
   113  	// responsibility to signal with updateFlag.
   114  	EstMinPriority(until mclock.AbsTime, cap uint64, update bool) int64
   115  }
   116  
   117  // ppNodeInfo is the internal node descriptor of PriorityPool
   118  type ppNodeInfo struct {
   119  	nodePriority               nodePriority
   120  	node                       *enode.Node
   121  	connected                  bool
   122  	capacity, origCap          uint64
   123  	bias                       time.Duration
   124  	forced, changed            bool
   125  	activeIndex, inactiveIndex int
   126  }
   127  
   128  // NewPriorityPool creates a new PriorityPool
   129  func NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv uint64) *PriorityPool {
   130  	pp := &PriorityPool{
   131  		ns:                ns,
   132  		PriorityPoolSetup: setup,
   133  		clock:             clock,
   134  		activeQueue:       prque.NewLazyQueue(activeSetIndex, activePriority, activeMaxPriority, clock, lazyQueueRefresh),
   135  		inactiveQueue:     prque.New(inactiveSetIndex),
   136  		minCap:            minCap,
   137  		activeBias:        activeBias,
   138  		capacityStepDiv:   capacityStepDiv,
   139  	}
   140  
   141  	ns.SubscribeField(pp.priorityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
   142  		if newValue != nil {
   143  			c := &ppNodeInfo{
   144  				node:          node,
   145  				nodePriority:  newValue.(nodePriority),
   146  				activeIndex:   -1,
   147  				inactiveIndex: -1,
   148  			}
   149  			ns.SetFieldSub(node, pp.ppNodeInfoField, c)
   150  		} else {
   151  			ns.SetStateSub(node, nodestate.Flags{}, pp.ActiveFlag.Or(pp.InactiveFlag), 0)
   152  			if n, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); n != nil {
   153  				pp.disconnectedNode(n)
   154  			}
   155  			ns.SetFieldSub(node, pp.CapacityField, nil)
   156  			ns.SetFieldSub(node, pp.ppNodeInfoField, nil)
   157  		}
   158  	})
   159  	ns.SubscribeState(pp.ActiveFlag.Or(pp.InactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
   160  		if c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); c != nil {
   161  			if oldState.IsEmpty() {
   162  				pp.connectedNode(c)
   163  			}
   164  			if newState.IsEmpty() {
   165  				pp.disconnectedNode(c)
   166  			}
   167  		}
   168  	})
   169  	ns.SubscribeState(pp.updateFlag, func(node *enode.Node, oldState, newState nodestate.Flags) {
   170  		if !newState.IsEmpty() {
   171  			pp.updatePriority(node)
   172  		}
   173  	})
   174  	return pp
   175  }
   176  
   177  // RequestCapacity checks whether changing the capacity of a node to the given target
   178  // is possible (bias is applied in favor of other active nodes if the target is higher
   179  // than the current capacity).
   180  // If setCap is true then it also performs the change if possible. The function returns
   181  // the minimum priority needed to do the change and whether it is currently allowed.
   182  // If setCap and allowed are both true then the caller can assume that the change was
   183  // successful.
   184  // Note: priorityField should always be set before calling RequestCapacity. If setCap
   185  // is false then both InactiveFlag and ActiveFlag can be unset and they are not changed
   186  // by this function call either.
   187  // Note 2: this function should run inside a NodeStateMachine operation
   188  func (pp *PriorityPool) RequestCapacity(node *enode.Node, targetCap uint64, bias time.Duration, setCap bool) (minPriority int64, allowed bool) {
   189  	pp.lock.Lock()
   190  	pp.activeQueue.Refresh()
   191  	var updates []capUpdate
   192  	defer func() {
   193  		pp.lock.Unlock()
   194  		pp.updateFlags(updates)
   195  	}()
   196  
   197  	if targetCap < pp.minCap {
   198  		targetCap = pp.minCap
   199  	}
   200  	c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo)
   201  	if c == nil {
   202  		log.Error("RequestCapacity called for unknown node", "id", node.ID())
   203  		return math.MaxInt64, false
   204  	}
   205  	var priority int64
   206  	if targetCap > c.capacity {
   207  		priority = c.nodePriority.EstMinPriority(pp.clock.Now()+mclock.AbsTime(bias), targetCap, false)
   208  	} else {
   209  		priority = c.nodePriority.Priority(pp.clock.Now(), targetCap)
   210  	}
   211  	pp.markForChange(c)
   212  	pp.setCapacity(c, targetCap)
   213  	c.forced = true
   214  	pp.activeQueue.Remove(c.activeIndex)
   215  	pp.inactiveQueue.Remove(c.inactiveIndex)
   216  	pp.activeQueue.Push(c)
   217  	minPriority = pp.enforceLimits()
   218  	// if capacity update is possible now then minPriority == math.MinInt64
   219  	// if it is not possible at all then minPriority == math.MaxInt64
   220  	allowed = priority > minPriority
   221  	updates = pp.finalizeChanges(setCap && allowed)
   222  	return
   223  }
   224  
   225  // SetLimits sets the maximum number and total capacity of simultaneously active nodes
   226  func (pp *PriorityPool) SetLimits(maxCount, maxCap uint64) {
   227  	pp.lock.Lock()
   228  	pp.activeQueue.Refresh()
   229  	var updates []capUpdate
   230  	defer func() {
   231  		pp.lock.Unlock()
   232  		pp.ns.Operation(func() { pp.updateFlags(updates) })
   233  	}()
   234  
   235  	inc := (maxCount > pp.maxCount) || (maxCap > pp.maxCap)
   236  	dec := (maxCount < pp.maxCount) || (maxCap < pp.maxCap)
   237  	pp.maxCount, pp.maxCap = maxCount, maxCap
   238  	if dec {
   239  		pp.enforceLimits()
   240  		updates = pp.finalizeChanges(true)
   241  	}
   242  	if inc {
   243  		updates = pp.tryActivate()
   244  	}
   245  }
   246  
   247  // SetActiveBias sets the bias applied when trying to activate inactive nodes
   248  func (pp *PriorityPool) SetActiveBias(bias time.Duration) {
   249  	pp.lock.Lock()
   250  	defer pp.lock.Unlock()
   251  
   252  	pp.activeBias = bias
   253  	pp.tryActivate()
   254  }
   255  
   256  // ActiveCapacity returns the total capacity of currently active nodes
   257  func (pp *PriorityPool) ActiveCapacity() uint64 {
   258  	pp.lock.Lock()
   259  	defer pp.lock.Unlock()
   260  
   261  	return pp.activeCap
   262  }
   263  
   264  // inactiveSetIndex callback updates ppNodeInfo item index in inactiveQueue
   265  func inactiveSetIndex(a interface{}, index int) {
   266  	a.(*ppNodeInfo).inactiveIndex = index
   267  }
   268  
   269  // activeSetIndex callback updates ppNodeInfo item index in activeQueue
   270  func activeSetIndex(a interface{}, index int) {
   271  	a.(*ppNodeInfo).activeIndex = index
   272  }
   273  
   274  // invertPriority inverts a priority value. The active queue uses inverted priorities
   275  // because the node on the top is the first to be deactivated.
   276  func invertPriority(p int64) int64 {
   277  	if p == math.MinInt64 {
   278  		return math.MaxInt64
   279  	}
   280  	return -p
   281  }
   282  
   283  // activePriority callback returns actual priority of ppNodeInfo item in activeQueue
   284  func activePriority(a interface{}, now mclock.AbsTime) int64 {
   285  	c := a.(*ppNodeInfo)
   286  	if c.forced {
   287  		return math.MinInt64
   288  	}
   289  	if c.bias == 0 {
   290  		return invertPriority(c.nodePriority.Priority(now, c.capacity))
   291  	} else {
   292  		return invertPriority(c.nodePriority.EstMinPriority(now+mclock.AbsTime(c.bias), c.capacity, true))
   293  	}
   294  }
   295  
   296  // activeMaxPriority callback returns estimated maximum priority of ppNodeInfo item in activeQueue
   297  func activeMaxPriority(a interface{}, until mclock.AbsTime) int64 {
   298  	c := a.(*ppNodeInfo)
   299  	if c.forced {
   300  		return math.MinInt64
   301  	}
   302  	return invertPriority(c.nodePriority.EstMinPriority(until+mclock.AbsTime(c.bias), c.capacity, false))
   303  }
   304  
   305  // inactivePriority callback returns actual priority of ppNodeInfo item in inactiveQueue
   306  func (pp *PriorityPool) inactivePriority(p *ppNodeInfo) int64 {
   307  	return p.nodePriority.Priority(pp.clock.Now(), pp.minCap)
   308  }
   309  
   310  // connectedNode is called when a new node has been added to the pool (InactiveFlag set)
   311  // Note: this function should run inside a NodeStateMachine operation
   312  func (pp *PriorityPool) connectedNode(c *ppNodeInfo) {
   313  	pp.lock.Lock()
   314  	pp.activeQueue.Refresh()
   315  	var updates []capUpdate
   316  	defer func() {
   317  		pp.lock.Unlock()
   318  		pp.updateFlags(updates)
   319  	}()
   320  
   321  	if c.connected {
   322  		return
   323  	}
   324  	c.connected = true
   325  	pp.inactiveQueue.Push(c, pp.inactivePriority(c))
   326  	updates = pp.tryActivate()
   327  }
   328  
   329  // disconnectedNode is called when a node has been removed from the pool (both InactiveFlag
   330  // and ActiveFlag reset)
   331  // Note: this function should run inside a NodeStateMachine operation
   332  func (pp *PriorityPool) disconnectedNode(c *ppNodeInfo) {
   333  	pp.lock.Lock()
   334  	pp.activeQueue.Refresh()
   335  	var updates []capUpdate
   336  	defer func() {
   337  		pp.lock.Unlock()
   338  		pp.updateFlags(updates)
   339  	}()
   340  
   341  	if !c.connected {
   342  		return
   343  	}
   344  	c.connected = false
   345  	pp.activeQueue.Remove(c.activeIndex)
   346  	pp.inactiveQueue.Remove(c.inactiveIndex)
   347  	if c.capacity != 0 {
   348  		pp.setCapacity(c, 0)
   349  		updates = pp.tryActivate()
   350  	}
   351  }
   352  
   353  // markForChange internally puts a node in a temporary state that can either be reverted
   354  // or confirmed later. This temporary state allows changing the capacity of a node and
   355  // moving it between the active and inactive queue. ActiveFlag/InactiveFlag and
   356  // CapacityField are not changed while the changes are still temporary.
   357  func (pp *PriorityPool) markForChange(c *ppNodeInfo) {
   358  	if c.changed {
   359  		return
   360  	}
   361  	c.changed = true
   362  	c.origCap = c.capacity
   363  	pp.changed = append(pp.changed, c)
   364  }
   365  
   366  // setCapacity changes the capacity of a node and adjusts activeCap and activeCount
   367  // accordingly. Note that this change is performed in the temporary state so it should
   368  // be called after markForChange and before finalizeChanges.
   369  func (pp *PriorityPool) setCapacity(n *ppNodeInfo, cap uint64) {
   370  	pp.activeCap += cap - n.capacity
   371  	if n.capacity == 0 {
   372  		pp.activeCount++
   373  	}
   374  	if cap == 0 {
   375  		pp.activeCount--
   376  	}
   377  	n.capacity = cap
   378  }
   379  
   380  // enforceLimits enforces active node count and total capacity limits. It returns the
   381  // lowest active node priority. Note that this function is performed on the temporary
   382  // internal state.
   383  func (pp *PriorityPool) enforceLimits() int64 {
   384  	if pp.activeCap <= pp.maxCap && pp.activeCount <= pp.maxCount {
   385  		return math.MinInt64
   386  	}
   387  	var maxActivePriority int64
   388  	pp.activeQueue.MultiPop(func(data interface{}, priority int64) bool {
   389  		c := data.(*ppNodeInfo)
   390  		pp.markForChange(c)
   391  		maxActivePriority = priority
   392  		if c.capacity == pp.minCap {
   393  			pp.setCapacity(c, 0)
   394  		} else {
   395  			sub := c.capacity / pp.capacityStepDiv
   396  			if c.capacity-sub < pp.minCap {
   397  				sub = c.capacity - pp.minCap
   398  			}
   399  			pp.setCapacity(c, c.capacity-sub)
   400  			pp.activeQueue.Push(c)
   401  		}
   402  		return pp.activeCap > pp.maxCap || pp.activeCount > pp.maxCount
   403  	})
   404  	return invertPriority(maxActivePriority)
   405  }
   406  
   407  // finalizeChanges either commits or reverts temporary changes. The necessary capacity
   408  // field and according flag updates are not performed here but returned in a list because
   409  // they should be performed while the mutex is not held.
   410  func (pp *PriorityPool) finalizeChanges(commit bool) (updates []capUpdate) {
   411  	for _, c := range pp.changed {
   412  		// always remove and push back in order to update biased/forced priority
   413  		pp.activeQueue.Remove(c.activeIndex)
   414  		pp.inactiveQueue.Remove(c.inactiveIndex)
   415  		c.bias = 0
   416  		c.forced = false
   417  		c.changed = false
   418  		if !commit {
   419  			pp.setCapacity(c, c.origCap)
   420  		}
   421  		if c.connected {
   422  			if c.capacity != 0 {
   423  				pp.activeQueue.Push(c)
   424  			} else {
   425  				pp.inactiveQueue.Push(c, pp.inactivePriority(c))
   426  			}
   427  			if c.capacity != c.origCap && commit {
   428  				updates = append(updates, capUpdate{c.node, c.origCap, c.capacity})
   429  			}
   430  		}
   431  		c.origCap = 0
   432  	}
   433  	pp.changed = nil
   434  	return
   435  }
   436  
   437  // capUpdate describes a CapacityField and ActiveFlag/InactiveFlag update
   438  type capUpdate struct {
   439  	node           *enode.Node
   440  	oldCap, newCap uint64
   441  }
   442  
   443  // updateFlags performs CapacityField and ActiveFlag/InactiveFlag updates while the
   444  // pool mutex is not held
   445  // Note: this function should run inside a NodeStateMachine operation
   446  func (pp *PriorityPool) updateFlags(updates []capUpdate) {
   447  	for _, f := range updates {
   448  		if f.oldCap == 0 {
   449  			pp.ns.SetStateSub(f.node, pp.ActiveFlag, pp.InactiveFlag, 0)
   450  		}
   451  		if f.newCap == 0 {
   452  			pp.ns.SetStateSub(f.node, pp.InactiveFlag, pp.ActiveFlag, 0)
   453  			pp.ns.SetFieldSub(f.node, pp.CapacityField, nil)
   454  		} else {
   455  			pp.ns.SetFieldSub(f.node, pp.CapacityField, f.newCap)
   456  		}
   457  	}
   458  }
   459  
   460  // tryActivate tries to activate inactive nodes if possible
   461  func (pp *PriorityPool) tryActivate() []capUpdate {
   462  	var commit bool
   463  	for pp.inactiveQueue.Size() > 0 {
   464  		c := pp.inactiveQueue.PopItem().(*ppNodeInfo)
   465  		pp.markForChange(c)
   466  		pp.setCapacity(c, pp.minCap)
   467  		c.bias = pp.activeBias
   468  		pp.activeQueue.Push(c)
   469  		pp.enforceLimits()
   470  		if c.capacity > 0 {
   471  			commit = true
   472  		} else {
   473  			break
   474  		}
   475  	}
   476  	return pp.finalizeChanges(commit)
   477  }
   478  
   479  // updatePriority gets the current priority value of the given node from the nodePriority
   480  // interface and performs the necessary changes. It is triggered by updateFlag.
   481  // Note: this function should run inside a NodeStateMachine operation
   482  func (pp *PriorityPool) updatePriority(node *enode.Node) {
   483  	pp.lock.Lock()
   484  	pp.activeQueue.Refresh()
   485  	var updates []capUpdate
   486  	defer func() {
   487  		pp.lock.Unlock()
   488  		pp.updateFlags(updates)
   489  	}()
   490  
   491  	c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo)
   492  	if c == nil || !c.connected {
   493  		return
   494  	}
   495  	pp.activeQueue.Remove(c.activeIndex)
   496  	pp.inactiveQueue.Remove(c.inactiveIndex)
   497  	if c.capacity != 0 {
   498  		pp.activeQueue.Push(c)
   499  	} else {
   500  		pp.inactiveQueue.Push(c, pp.inactivePriority(c))
   501  	}
   502  	updates = pp.tryActivate()
   503  }