github.com/theQRL/go-zond@v0.1.1/les/flowcontrol/manager.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package flowcontrol
    18  
    19  import (
    20  	"fmt"
    21  	"math"
    22  	"sync"
    23  	"time"
    24  
    25  	"github.com/theQRL/go-zond/common/mclock"
    26  	"github.com/theQRL/go-zond/common/prque"
    27  )
    28  
    29  // cmNodeFields are ClientNode fields used by the client manager
    30  // Note: these fields are locked by the client manager's mutex
    31  type cmNodeFields struct {
    32  	corrBufValue   int64 // buffer value adjusted with the extra recharge amount
    33  	rcLastIntValue int64 // past recharge integrator value when corrBufValue was last updated
    34  	rcFullIntValue int64 // future recharge integrator value when corrBufValue will reach maximum
    35  	queueIndex     int   // position in the recharge queue (-1 if not queued)
    36  }
    37  
    38  // FixedPointMultiplier is applied to the recharge integrator and the recharge curve.
    39  //
    40  // Note: fixed point arithmetic is required for the integrator because it is a
    41  // constantly increasing value that can wrap around int64 limits (which behavior is
    42  // also supported by the priority queue). A floating point value would gradually lose
    43  // precision in this application.
    44  // The recharge curve and all recharge values are encoded as fixed point because
    45  // sumRecharge is frequently updated by adding or subtracting individual recharge
    46  // values and perfect precision is required.
    47  const FixedPointMultiplier = 1000000
    48  
    49  var (
    50  	capacityDropFactor          = 0.1
    51  	capacityRaiseTC             = 1 / (3 * float64(time.Hour)) // time constant for raising the capacity factor
    52  	capacityRaiseThresholdRatio = 1.125                        // total/connected capacity ratio threshold for raising the capacity factor
    53  )
    54  
    55  // ClientManager controls the capacity assigned to the clients of a server.
    56  // Since ServerParams guarantee a safe lower estimate for processable requests
    57  // even in case of all clients being active, ClientManager calculates a
    58  // corrugated buffer value and usually allows a higher remaining buffer value
    59  // to be returned with each reply.
    60  type ClientManager struct {
    61  	clock mclock.Clock
    62  	lock  sync.Mutex
    63  	stop  chan chan struct{}
    64  
    65  	curve                                      PieceWiseLinear
    66  	sumRecharge, totalRecharge, totalConnected uint64
    67  	logTotalCap, totalCapacity                 float64
    68  	logTotalCapRaiseLimit                      float64
    69  	minLogTotalCap, maxLogTotalCap             float64
    70  	capacityRaiseThreshold                     uint64
    71  	capLastUpdate                              mclock.AbsTime
    72  	totalCapacityCh                            chan uint64
    73  
    74  	// recharge integrator is increasing in each moment with a rate of
    75  	// (totalRecharge / sumRecharge)*FixedPointMultiplier or 0 if sumRecharge==0
    76  	rcLastUpdate   mclock.AbsTime // last time the recharge integrator was updated
    77  	rcLastIntValue int64          // last updated value of the recharge integrator
    78  	priorityOffset int64          // offset for prque priority values ensures that all priorities stay in the int64 range
    79  	// recharge queue is a priority queue with currently recharging client nodes
    80  	// as elements. The priority value is rcFullIntValue which allows to quickly
    81  	// determine which client will first finish recharge.
    82  	rcQueue *prque.Prque[int64, *ClientNode]
    83  }
    84  
    85  // NewClientManager returns a new client manager.
    86  // Client manager enhances flow control performance by allowing client buffers
    87  // to recharge quicker than the minimum guaranteed recharge rate if possible.
    88  // The sum of all minimum recharge rates (sumRecharge) is updated each time
    89  // a clients starts or finishes buffer recharging. Then an adjusted total
    90  // recharge rate is calculated using a piecewise linear recharge curve:
    91  //
    92  // totalRecharge = curve(sumRecharge)
    93  // (totalRecharge >= sumRecharge is enforced)
    94  //
    95  // Then the "bonus" buffer recharge is distributed between currently recharging
    96  // clients proportionally to their minimum recharge rates.
    97  //
    98  // Note: total recharge is proportional to the average number of parallel running
    99  // serving threads. A recharge value of 1000000 corresponds to one thread in average.
   100  // The maximum number of allowed serving threads should always be considerably
   101  // higher than the targeted average number.
   102  //
   103  // Note 2: although it is possible to specify a curve allowing the total target
   104  // recharge starting from zero sumRecharge, it makes sense to add a linear ramp
   105  // starting from zero in order to not let a single low-priority client use up
   106  // the entire server capacity and thus ensure quick availability for others at
   107  // any moment.
   108  func NewClientManager(curve PieceWiseLinear, clock mclock.Clock) *ClientManager {
   109  	cm := &ClientManager{
   110  		clock:         clock,
   111  		rcQueue:       prque.New[int64, *ClientNode](func(a *ClientNode, i int) { a.queueIndex = i }),
   112  		capLastUpdate: clock.Now(),
   113  		stop:          make(chan chan struct{}),
   114  	}
   115  	if curve != nil {
   116  		cm.SetRechargeCurve(curve)
   117  	}
   118  	go func() {
   119  		// regularly recalculate and update total capacity
   120  		for {
   121  			select {
   122  			case <-time.After(time.Minute):
   123  				cm.lock.Lock()
   124  				cm.updateTotalCapacity(cm.clock.Now(), true)
   125  				cm.lock.Unlock()
   126  			case stop := <-cm.stop:
   127  				close(stop)
   128  				return
   129  			}
   130  		}
   131  	}()
   132  	return cm
   133  }
   134  
   135  // Stop stops the client manager
   136  func (cm *ClientManager) Stop() {
   137  	stop := make(chan struct{})
   138  	cm.stop <- stop
   139  	<-stop
   140  }
   141  
   142  // SetRechargeCurve updates the recharge curve
   143  func (cm *ClientManager) SetRechargeCurve(curve PieceWiseLinear) {
   144  	cm.lock.Lock()
   145  	defer cm.lock.Unlock()
   146  
   147  	now := cm.clock.Now()
   148  	cm.updateRecharge(now)
   149  	cm.curve = curve
   150  	if len(curve) > 0 {
   151  		cm.totalRecharge = curve[len(curve)-1].Y
   152  	} else {
   153  		cm.totalRecharge = 0
   154  	}
   155  }
   156  
   157  // SetCapacityLimits sets a threshold value used for raising capFactor.
   158  // Either if the difference between total allowed and connected capacity is less
   159  // than this threshold or if their ratio is less than capacityRaiseThresholdRatio
   160  // then capFactor is allowed to slowly raise.
   161  func (cm *ClientManager) SetCapacityLimits(min, max, raiseThreshold uint64) {
   162  	if min < 1 {
   163  		min = 1
   164  	}
   165  	cm.minLogTotalCap = math.Log(float64(min))
   166  	if max < 1 {
   167  		max = 1
   168  	}
   169  	cm.maxLogTotalCap = math.Log(float64(max))
   170  	cm.logTotalCap = cm.maxLogTotalCap
   171  	cm.capacityRaiseThreshold = raiseThreshold
   172  	cm.refreshCapacity()
   173  }
   174  
   175  // connect should be called when a client is connected, before passing it to any
   176  // other ClientManager function
   177  func (cm *ClientManager) connect(node *ClientNode) {
   178  	cm.lock.Lock()
   179  	defer cm.lock.Unlock()
   180  
   181  	now := cm.clock.Now()
   182  	cm.updateRecharge(now)
   183  	node.corrBufValue = int64(node.params.BufLimit)
   184  	node.rcLastIntValue = cm.rcLastIntValue
   185  	node.queueIndex = -1
   186  	cm.updateTotalCapacity(now, true)
   187  	cm.totalConnected += node.params.MinRecharge
   188  	cm.updateRaiseLimit()
   189  }
   190  
   191  // disconnect should be called when a client is disconnected
   192  func (cm *ClientManager) disconnect(node *ClientNode) {
   193  	cm.lock.Lock()
   194  	defer cm.lock.Unlock()
   195  
   196  	now := cm.clock.Now()
   197  	cm.updateRecharge(cm.clock.Now())
   198  	cm.updateTotalCapacity(now, true)
   199  	cm.totalConnected -= node.params.MinRecharge
   200  	cm.updateRaiseLimit()
   201  }
   202  
   203  // accepted is called when a request with given maximum cost is accepted.
   204  // It returns a priority indicator for the request which is used to determine placement
   205  // in the serving queue. Older requests have higher priority by default. If the client
   206  // is almost out of buffer, request priority is reduced.
   207  func (cm *ClientManager) accepted(node *ClientNode, maxCost uint64, now mclock.AbsTime) (priority int64) {
   208  	cm.lock.Lock()
   209  	defer cm.lock.Unlock()
   210  
   211  	cm.updateNodeRc(node, -int64(maxCost), &node.params, now)
   212  	rcTime := (node.params.BufLimit - uint64(node.corrBufValue)) * FixedPointMultiplier / node.params.MinRecharge
   213  	return -int64(now) - int64(rcTime)
   214  }
   215  
   216  // processed updates the client buffer according to actual request cost after
   217  // serving has been finished.
   218  //
   219  // Note: processed should always be called for all accepted requests
   220  func (cm *ClientManager) processed(node *ClientNode, maxCost, realCost uint64, now mclock.AbsTime) {
   221  	if realCost > maxCost {
   222  		realCost = maxCost
   223  	}
   224  	cm.updateBuffer(node, int64(maxCost-realCost), now)
   225  }
   226  
   227  // updateBuffer recalculates the corrected buffer value, adds the given value to it
   228  // and updates the node's actual buffer value if possible
   229  func (cm *ClientManager) updateBuffer(node *ClientNode, add int64, now mclock.AbsTime) {
   230  	cm.lock.Lock()
   231  	defer cm.lock.Unlock()
   232  
   233  	cm.updateNodeRc(node, add, &node.params, now)
   234  	if node.corrBufValue > node.bufValue {
   235  		if node.log != nil {
   236  			node.log.add(now, fmt.Sprintf("corrected  bv=%d  oldBv=%d", node.corrBufValue, node.bufValue))
   237  		}
   238  		node.bufValue = node.corrBufValue
   239  	}
   240  }
   241  
   242  // updateParams updates the flow control parameters of a client node
   243  func (cm *ClientManager) updateParams(node *ClientNode, params ServerParams, now mclock.AbsTime) {
   244  	cm.lock.Lock()
   245  	defer cm.lock.Unlock()
   246  
   247  	cm.updateRecharge(now)
   248  	cm.updateTotalCapacity(now, true)
   249  	cm.totalConnected += params.MinRecharge - node.params.MinRecharge
   250  	cm.updateRaiseLimit()
   251  	cm.updateNodeRc(node, 0, &params, now)
   252  }
   253  
   254  // updateRaiseLimit recalculates the limiting value until which logTotalCap
   255  // can be raised when no client freeze events occur
   256  func (cm *ClientManager) updateRaiseLimit() {
   257  	if cm.capacityRaiseThreshold == 0 {
   258  		cm.logTotalCapRaiseLimit = 0
   259  		return
   260  	}
   261  	limit := float64(cm.totalConnected + cm.capacityRaiseThreshold)
   262  	limit2 := float64(cm.totalConnected) * capacityRaiseThresholdRatio
   263  	if limit2 > limit {
   264  		limit = limit2
   265  	}
   266  	if limit < 1 {
   267  		limit = 1
   268  	}
   269  	cm.logTotalCapRaiseLimit = math.Log(limit)
   270  }
   271  
   272  // updateRecharge updates the recharge integrator and checks the recharge queue
   273  // for nodes with recently filled buffers
   274  func (cm *ClientManager) updateRecharge(now mclock.AbsTime) {
   275  	lastUpdate := cm.rcLastUpdate
   276  	cm.rcLastUpdate = now
   277  	// updating is done in multiple steps if node buffers are filled and sumRecharge
   278  	// is decreased before the given target time
   279  	for cm.sumRecharge > 0 {
   280  		sumRecharge := cm.sumRecharge
   281  		if sumRecharge > cm.totalRecharge {
   282  			sumRecharge = cm.totalRecharge
   283  		}
   284  		bonusRatio := float64(1)
   285  		v := cm.curve.ValueAt(sumRecharge)
   286  		s := float64(sumRecharge)
   287  		if v > s && s > 0 {
   288  			bonusRatio = v / s
   289  		}
   290  		dt := now - lastUpdate
   291  		// fetch the client that finishes first
   292  		rcqNode := cm.rcQueue.PopItem() // if sumRecharge > 0 then the queue cannot be empty
   293  		// check whether it has already finished
   294  		dtNext := mclock.AbsTime(float64(rcqNode.rcFullIntValue-cm.rcLastIntValue) / bonusRatio)
   295  		if dt < dtNext {
   296  			// not finished yet, put it back, update integrator according
   297  			// to current bonusRatio and return
   298  			cm.addToQueue(rcqNode)
   299  			cm.rcLastIntValue += int64(bonusRatio * float64(dt))
   300  			return
   301  		}
   302  		lastUpdate += dtNext
   303  		// finished recharging, update corrBufValue and sumRecharge if necessary and do next step
   304  		if rcqNode.corrBufValue < int64(rcqNode.params.BufLimit) {
   305  			rcqNode.corrBufValue = int64(rcqNode.params.BufLimit)
   306  			cm.sumRecharge -= rcqNode.params.MinRecharge
   307  		}
   308  		cm.rcLastIntValue = rcqNode.rcFullIntValue
   309  	}
   310  }
   311  
   312  func (cm *ClientManager) addToQueue(node *ClientNode) {
   313  	if cm.priorityOffset-node.rcFullIntValue < -0x4000000000000000 {
   314  		cm.priorityOffset += 0x4000000000000000
   315  		// recreate priority queue with new offset to avoid overflow; should happen very rarely
   316  		newRcQueue := prque.New[int64, *ClientNode](func(a *ClientNode, i int) { a.queueIndex = i })
   317  		for cm.rcQueue.Size() > 0 {
   318  			n := cm.rcQueue.PopItem()
   319  			newRcQueue.Push(n, cm.priorityOffset-n.rcFullIntValue)
   320  		}
   321  		cm.rcQueue = newRcQueue
   322  	}
   323  	cm.rcQueue.Push(node, cm.priorityOffset-node.rcFullIntValue)
   324  }
   325  
   326  // updateNodeRc updates a node's corrBufValue and adds an external correction value.
   327  // It also adds or removes the rcQueue entry and updates ServerParams and sumRecharge if necessary.
   328  func (cm *ClientManager) updateNodeRc(node *ClientNode, bvc int64, params *ServerParams, now mclock.AbsTime) {
   329  	cm.updateRecharge(now)
   330  	wasFull := true
   331  	if node.corrBufValue != int64(node.params.BufLimit) {
   332  		wasFull = false
   333  		node.corrBufValue += (cm.rcLastIntValue - node.rcLastIntValue) * int64(node.params.MinRecharge) / FixedPointMultiplier
   334  		if node.corrBufValue > int64(node.params.BufLimit) {
   335  			node.corrBufValue = int64(node.params.BufLimit)
   336  		}
   337  		node.rcLastIntValue = cm.rcLastIntValue
   338  	}
   339  	node.corrBufValue += bvc
   340  	diff := int64(params.BufLimit - node.params.BufLimit)
   341  	if diff > 0 {
   342  		node.corrBufValue += diff
   343  	}
   344  	isFull := false
   345  	if node.corrBufValue >= int64(params.BufLimit) {
   346  		node.corrBufValue = int64(params.BufLimit)
   347  		isFull = true
   348  	}
   349  	if !wasFull {
   350  		cm.sumRecharge -= node.params.MinRecharge
   351  	}
   352  	if params != &node.params {
   353  		node.params = *params
   354  	}
   355  	if !isFull {
   356  		cm.sumRecharge += node.params.MinRecharge
   357  		if node.queueIndex != -1 {
   358  			cm.rcQueue.Remove(node.queueIndex)
   359  		}
   360  		node.rcLastIntValue = cm.rcLastIntValue
   361  		node.rcFullIntValue = cm.rcLastIntValue + (int64(node.params.BufLimit)-node.corrBufValue)*FixedPointMultiplier/int64(node.params.MinRecharge)
   362  		cm.addToQueue(node)
   363  	}
   364  }
   365  
   366  // reduceTotalCapacity reduces the total capacity allowance in case of a client freeze event
   367  func (cm *ClientManager) reduceTotalCapacity(frozenCap uint64) {
   368  	cm.lock.Lock()
   369  	defer cm.lock.Unlock()
   370  
   371  	ratio := float64(1)
   372  	if frozenCap < cm.totalConnected {
   373  		ratio = float64(frozenCap) / float64(cm.totalConnected)
   374  	}
   375  	now := cm.clock.Now()
   376  	cm.updateTotalCapacity(now, false)
   377  	cm.logTotalCap -= capacityDropFactor * ratio
   378  	if cm.logTotalCap < cm.minLogTotalCap {
   379  		cm.logTotalCap = cm.minLogTotalCap
   380  	}
   381  	cm.updateTotalCapacity(now, true)
   382  }
   383  
   384  // updateTotalCapacity updates the total capacity factor. The capacity factor allows
   385  // the total capacity of the system to go over the allowed total recharge value
   386  // if clients go to frozen state sufficiently rarely.
   387  // The capacity factor is dropped instantly by a small amount if a clients is frozen.
   388  // It is raised slowly (with a large time constant) if the total connected capacity
   389  // is close to the total allowed amount and no clients are frozen.
   390  func (cm *ClientManager) updateTotalCapacity(now mclock.AbsTime, refresh bool) {
   391  	dt := now - cm.capLastUpdate
   392  	cm.capLastUpdate = now
   393  
   394  	if cm.logTotalCap < cm.logTotalCapRaiseLimit {
   395  		cm.logTotalCap += capacityRaiseTC * float64(dt)
   396  		if cm.logTotalCap > cm.logTotalCapRaiseLimit {
   397  			cm.logTotalCap = cm.logTotalCapRaiseLimit
   398  		}
   399  	}
   400  	if cm.logTotalCap > cm.maxLogTotalCap {
   401  		cm.logTotalCap = cm.maxLogTotalCap
   402  	}
   403  	if refresh {
   404  		cm.refreshCapacity()
   405  	}
   406  }
   407  
   408  // refreshCapacity recalculates the total capacity value and sends an update to the subscription
   409  // channel if the relative change of the value since the last update is more than 0.1 percent
   410  func (cm *ClientManager) refreshCapacity() {
   411  	totalCapacity := math.Exp(cm.logTotalCap)
   412  	if totalCapacity >= cm.totalCapacity*0.999 && totalCapacity <= cm.totalCapacity*1.001 {
   413  		return
   414  	}
   415  	cm.totalCapacity = totalCapacity
   416  	if cm.totalCapacityCh != nil {
   417  		select {
   418  		case cm.totalCapacityCh <- uint64(cm.totalCapacity):
   419  		default:
   420  		}
   421  	}
   422  }
   423  
   424  // SubscribeTotalCapacity returns all future updates to the total capacity value
   425  // through a channel and also returns the current value
   426  func (cm *ClientManager) SubscribeTotalCapacity(ch chan uint64) uint64 {
   427  	cm.lock.Lock()
   428  	defer cm.lock.Unlock()
   429  
   430  	cm.totalCapacityCh = ch
   431  	return uint64(cm.totalCapacity)
   432  }
   433  
   434  // PieceWiseLinear is used to describe recharge curves
   435  type PieceWiseLinear []struct{ X, Y uint64 }
   436  
   437  // ValueAt returns the curve's value at a given point
   438  func (pwl PieceWiseLinear) ValueAt(x uint64) float64 {
   439  	l := 0
   440  	h := len(pwl)
   441  	if h == 0 {
   442  		return 0
   443  	}
   444  	for h != l {
   445  		m := (l + h) / 2
   446  		if x > pwl[m].X {
   447  			l = m + 1
   448  		} else {
   449  			h = m
   450  		}
   451  	}
   452  	if l == 0 {
   453  		return float64(pwl[0].Y)
   454  	}
   455  	l--
   456  	if h == len(pwl) {
   457  		return float64(pwl[l].Y)
   458  	}
   459  	dx := pwl[h].X - pwl[l].X
   460  	if dx < 1 {
   461  		return float64(pwl[l].Y)
   462  	}
   463  	return float64(pwl[l].Y) + float64(pwl[h].Y-pwl[l].Y)*float64(x-pwl[l].X)/float64(dx)
   464  }
   465  
   466  // Valid returns true if the X coordinates of the curve points are non-strictly monotonic
   467  func (pwl PieceWiseLinear) Valid() bool {
   468  	var lastX uint64
   469  	for _, i := range pwl {
   470  		if i.X < lastX {
   471  			return false
   472  		}
   473  		lastX = i.X
   474  	}
   475  	return true
   476  }