github.com/core-coin/go-core/v2@v2.1.9/les/flowcontrol/manager.go (about) 1 // Copyright 2016 by the Authors 2 // This file is part of the go-core library. 3 // 4 // The go-core library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-core library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-core library. If not, see <http://www.gnu.org/licenses/>. 16 17 package flowcontrol 18 19 import ( 20 "fmt" 21 "math" 22 "sync" 23 "time" 24 25 "github.com/core-coin/go-core/v2/common/mclock" 26 "github.com/core-coin/go-core/v2/common/prque" 27 ) 28 29 // cmNodeFields are ClientNode fields used by the client manager 30 // Note: these fields are locked by the client manager's mutex 31 type cmNodeFields struct { 32 corrBufValue int64 // buffer value adjusted with the extra recharge amount 33 rcLastIntValue int64 // past recharge integrator value when corrBufValue was last updated 34 rcFullIntValue int64 // future recharge integrator value when corrBufValue will reach maximum 35 queueIndex int // position in the recharge queue (-1 if not queued) 36 } 37 38 // FixedPointMultiplier is applied to the recharge integrator and the recharge curve. 39 // 40 // Note: fixed point arithmetic is required for the integrator because it is a 41 // constantly increasing value that can wrap around int64 limits (which behavior is 42 // also supported by the priority queue). A floating point value would gradually lose 43 // precision in this application. 44 // The recharge curve and all recharge values are encoded as fixed point because 45 // sumRecharge is frequently updated by adding or subtracting individual recharge 46 // values and perfect precision is required. 47 const FixedPointMultiplier = 1000000 48 49 var ( 50 capacityDropFactor = 0.1 51 capacityRaiseTC = 1 / (3 * float64(time.Hour)) // time constant for raising the capacity factor 52 capacityRaiseThresholdRatio = 1.125 // total/connected capacity ratio threshold for raising the capacity factor 53 ) 54 55 // ClientManager controls the capacity assigned to the clients of a server. 56 // Since ServerParams guarantee a safe lower estimate for processable requests 57 // even in case of all clients being active, ClientManager calculates a 58 // corrigated buffer value and usually allows a higher remaining buffer value 59 // to be returned with each reply. 60 type ClientManager struct { 61 clock mclock.Clock 62 lock sync.Mutex 63 enabledCh chan struct{} 64 stop chan chan struct{} 65 66 curve PieceWiseLinear 67 sumRecharge, totalRecharge, totalConnected uint64 68 logTotalCap, totalCapacity float64 69 logTotalCapRaiseLimit float64 70 minLogTotalCap, maxLogTotalCap float64 71 capacityRaiseThreshold uint64 72 capLastUpdate mclock.AbsTime 73 totalCapacityCh chan uint64 74 75 // recharge integrator is increasing in each moment with a rate of 76 // (totalRecharge / sumRecharge)*FixedPointMultiplier or 0 if sumRecharge==0 77 rcLastUpdate mclock.AbsTime // last time the recharge integrator was updated 78 rcLastIntValue int64 // last updated value of the recharge integrator 79 // recharge queue is a priority queue with currently recharging client nodes 80 // as elements. The priority value is rcFullIntValue which allows to quickly 81 // determine which client will first finish recharge. 82 rcQueue *prque.Prque 83 } 84 85 // NewClientManager returns a new client manager. 86 // Client manager enhances flow control performance by allowing client buffers 87 // to recharge quicker than the minimum guaranteed recharge rate if possible. 88 // The sum of all minimum recharge rates (sumRecharge) is updated each time 89 // a clients starts or finishes buffer recharging. Then an adjusted total 90 // recharge rate is calculated using a piecewise linear recharge curve: 91 // 92 // totalRecharge = curve(sumRecharge) 93 // (totalRecharge >= sumRecharge is enforced) 94 // 95 // Then the "bonus" buffer recharge is distributed between currently recharging 96 // clients proportionally to their minimum recharge rates. 97 // 98 // Note: total recharge is proportional to the average number of parallel running 99 // serving threads. A recharge value of 1000000 corresponds to one thread in average. 100 // The maximum number of allowed serving threads should always be considerably 101 // higher than the targeted average number. 102 // 103 // Note 2: although it is possible to specify a curve allowing the total target 104 // recharge starting from zero sumRecharge, it makes sense to add a linear ramp 105 // starting from zero in order to not let a single low-priority client use up 106 // the entire server capacity and thus ensure quick availability for others at 107 // any moment. 108 func NewClientManager(curve PieceWiseLinear, clock mclock.Clock) *ClientManager { 109 cm := &ClientManager{ 110 clock: clock, 111 rcQueue: prque.New(func(a interface{}, i int) { a.(*ClientNode).queueIndex = i }), 112 capLastUpdate: clock.Now(), 113 stop: make(chan chan struct{}), 114 } 115 if curve != nil { 116 cm.SetRechargeCurve(curve) 117 } 118 go func() { 119 // regularly recalculate and update total capacity 120 for { 121 select { 122 case <-time.After(time.Minute): 123 cm.lock.Lock() 124 cm.updateTotalCapacity(cm.clock.Now(), true) 125 cm.lock.Unlock() 126 case stop := <-cm.stop: 127 close(stop) 128 return 129 } 130 } 131 }() 132 return cm 133 } 134 135 // Stop stops the client manager 136 func (cm *ClientManager) Stop() { 137 stop := make(chan struct{}) 138 cm.stop <- stop 139 <-stop 140 } 141 142 // SetRechargeCurve updates the recharge curve 143 func (cm *ClientManager) SetRechargeCurve(curve PieceWiseLinear) { 144 cm.lock.Lock() 145 defer cm.lock.Unlock() 146 147 now := cm.clock.Now() 148 cm.updateRecharge(now) 149 cm.curve = curve 150 if len(curve) > 0 { 151 cm.totalRecharge = curve[len(curve)-1].Y 152 } else { 153 cm.totalRecharge = 0 154 } 155 } 156 157 // SetCapacityRaiseThreshold sets a threshold value used for raising capFactor. 158 // Either if the difference between total allowed and connected capacity is less 159 // than this threshold or if their ratio is less than capacityRaiseThresholdRatio 160 // then capFactor is allowed to slowly raise. 161 func (cm *ClientManager) SetCapacityLimits(min, max, raiseThreshold uint64) { 162 if min < 1 { 163 min = 1 164 } 165 cm.minLogTotalCap = math.Log(float64(min)) 166 if max < 1 { 167 max = 1 168 } 169 cm.maxLogTotalCap = math.Log(float64(max)) 170 cm.logTotalCap = cm.maxLogTotalCap 171 cm.capacityRaiseThreshold = raiseThreshold 172 cm.refreshCapacity() 173 } 174 175 // connect should be called when a client is connected, before passing it to any 176 // other ClientManager function 177 func (cm *ClientManager) connect(node *ClientNode) { 178 cm.lock.Lock() 179 defer cm.lock.Unlock() 180 181 now := cm.clock.Now() 182 cm.updateRecharge(now) 183 node.corrBufValue = int64(node.params.BufLimit) 184 node.rcLastIntValue = cm.rcLastIntValue 185 node.queueIndex = -1 186 cm.updateTotalCapacity(now, true) 187 cm.totalConnected += node.params.MinRecharge 188 cm.updateRaiseLimit() 189 } 190 191 // disconnect should be called when a client is disconnected 192 func (cm *ClientManager) disconnect(node *ClientNode) { 193 cm.lock.Lock() 194 defer cm.lock.Unlock() 195 196 now := cm.clock.Now() 197 cm.updateRecharge(cm.clock.Now()) 198 cm.updateTotalCapacity(now, true) 199 cm.totalConnected -= node.params.MinRecharge 200 cm.updateRaiseLimit() 201 } 202 203 // accepted is called when a request with given maximum cost is accepted. 204 // It returns a priority indicator for the request which is used to determine placement 205 // in the serving queue. Older requests have higher priority by default. If the client 206 // is almost out of buffer, request priority is reduced. 207 func (cm *ClientManager) accepted(node *ClientNode, maxCost uint64, now mclock.AbsTime) (priority int64) { 208 cm.lock.Lock() 209 defer cm.lock.Unlock() 210 211 cm.updateNodeRc(node, -int64(maxCost), &node.params, now) 212 rcTime := (node.params.BufLimit - uint64(node.corrBufValue)) * FixedPointMultiplier / node.params.MinRecharge 213 return -int64(now) - int64(rcTime) 214 } 215 216 // processed updates the client buffer according to actual request cost after 217 // serving has been finished. 218 // 219 // Note: processed should always be called for all accepted requests 220 func (cm *ClientManager) processed(node *ClientNode, maxCost, realCost uint64, now mclock.AbsTime) { 221 if realCost > maxCost { 222 realCost = maxCost 223 } 224 cm.updateBuffer(node, int64(maxCost-realCost), now) 225 } 226 227 // updateBuffer recalulates the corrected buffer value, adds the given value to it 228 // and updates the node's actual buffer value if possible 229 func (cm *ClientManager) updateBuffer(node *ClientNode, add int64, now mclock.AbsTime) { 230 cm.lock.Lock() 231 defer cm.lock.Unlock() 232 233 cm.updateNodeRc(node, add, &node.params, now) 234 if node.corrBufValue > node.bufValue { 235 if node.log != nil { 236 node.log.add(now, fmt.Sprintf("corrected bv=%d oldBv=%d", node.corrBufValue, node.bufValue)) 237 } 238 node.bufValue = node.corrBufValue 239 } 240 } 241 242 // updateParams updates the flow control parameters of a client node 243 func (cm *ClientManager) updateParams(node *ClientNode, params ServerParams, now mclock.AbsTime) { 244 cm.lock.Lock() 245 defer cm.lock.Unlock() 246 247 cm.updateRecharge(now) 248 cm.updateTotalCapacity(now, true) 249 cm.totalConnected += params.MinRecharge - node.params.MinRecharge 250 cm.updateRaiseLimit() 251 cm.updateNodeRc(node, 0, ¶ms, now) 252 } 253 254 // updateRaiseLimit recalculates the limiting value until which logTotalCap 255 // can be raised when no client freeze events occur 256 func (cm *ClientManager) updateRaiseLimit() { 257 if cm.capacityRaiseThreshold == 0 { 258 cm.logTotalCapRaiseLimit = 0 259 return 260 } 261 limit := float64(cm.totalConnected + cm.capacityRaiseThreshold) 262 limit2 := float64(cm.totalConnected) * capacityRaiseThresholdRatio 263 if limit2 > limit { 264 limit = limit2 265 } 266 if limit < 1 { 267 limit = 1 268 } 269 cm.logTotalCapRaiseLimit = math.Log(limit) 270 } 271 272 // updateRecharge updates the recharge integrator and checks the recharge queue 273 // for nodes with recently filled buffers 274 func (cm *ClientManager) updateRecharge(now mclock.AbsTime) { 275 lastUpdate := cm.rcLastUpdate 276 cm.rcLastUpdate = now 277 // updating is done in multiple steps if node buffers are filled and sumRecharge 278 // is decreased before the given target time 279 for cm.sumRecharge > 0 { 280 sumRecharge := cm.sumRecharge 281 if sumRecharge > cm.totalRecharge { 282 sumRecharge = cm.totalRecharge 283 } 284 bonusRatio := float64(1) 285 v := cm.curve.ValueAt(sumRecharge) 286 s := float64(sumRecharge) 287 if v > s && s > 0 { 288 bonusRatio = v / s 289 } 290 dt := now - lastUpdate 291 // fetch the client that finishes first 292 rcqNode := cm.rcQueue.PopItem().(*ClientNode) // if sumRecharge > 0 then the queue cannot be empty 293 // check whether it has already finished 294 dtNext := mclock.AbsTime(float64(rcqNode.rcFullIntValue-cm.rcLastIntValue) / bonusRatio) 295 if dt < dtNext { 296 // not finished yet, put it back, update integrator according 297 // to current bonusRatio and return 298 cm.rcQueue.Push(rcqNode, -rcqNode.rcFullIntValue) 299 cm.rcLastIntValue += int64(bonusRatio * float64(dt)) 300 return 301 } 302 lastUpdate += dtNext 303 // finished recharging, update corrBufValue and sumRecharge if necessary and do next step 304 if rcqNode.corrBufValue < int64(rcqNode.params.BufLimit) { 305 rcqNode.corrBufValue = int64(rcqNode.params.BufLimit) 306 cm.sumRecharge -= rcqNode.params.MinRecharge 307 } 308 cm.rcLastIntValue = rcqNode.rcFullIntValue 309 } 310 } 311 312 // updateNodeRc updates a node's corrBufValue and adds an external correction value. 313 // It also adds or removes the rcQueue entry and updates ServerParams and sumRecharge if necessary. 314 func (cm *ClientManager) updateNodeRc(node *ClientNode, bvc int64, params *ServerParams, now mclock.AbsTime) { 315 cm.updateRecharge(now) 316 wasFull := true 317 if node.corrBufValue != int64(node.params.BufLimit) { 318 wasFull = false 319 node.corrBufValue += (cm.rcLastIntValue - node.rcLastIntValue) * int64(node.params.MinRecharge) / FixedPointMultiplier 320 if node.corrBufValue > int64(node.params.BufLimit) { 321 node.corrBufValue = int64(node.params.BufLimit) 322 } 323 node.rcLastIntValue = cm.rcLastIntValue 324 } 325 node.corrBufValue += bvc 326 diff := int64(params.BufLimit - node.params.BufLimit) 327 if diff > 0 { 328 node.corrBufValue += diff 329 } 330 isFull := false 331 if node.corrBufValue >= int64(params.BufLimit) { 332 node.corrBufValue = int64(params.BufLimit) 333 isFull = true 334 } 335 if !wasFull { 336 cm.sumRecharge -= node.params.MinRecharge 337 } 338 if params != &node.params { 339 node.params = *params 340 } 341 if !isFull { 342 cm.sumRecharge += node.params.MinRecharge 343 if node.queueIndex != -1 { 344 cm.rcQueue.Remove(node.queueIndex) 345 } 346 node.rcLastIntValue = cm.rcLastIntValue 347 node.rcFullIntValue = cm.rcLastIntValue + (int64(node.params.BufLimit)-node.corrBufValue)*FixedPointMultiplier/int64(node.params.MinRecharge) 348 cm.rcQueue.Push(node, -node.rcFullIntValue) 349 } 350 } 351 352 // reduceTotalCapacity reduces the total capacity allowance in case of a client freeze event 353 func (cm *ClientManager) reduceTotalCapacity(frozenCap uint64) { 354 cm.lock.Lock() 355 defer cm.lock.Unlock() 356 357 ratio := float64(1) 358 if frozenCap < cm.totalConnected { 359 ratio = float64(frozenCap) / float64(cm.totalConnected) 360 } 361 now := cm.clock.Now() 362 cm.updateTotalCapacity(now, false) 363 cm.logTotalCap -= capacityDropFactor * ratio 364 if cm.logTotalCap < cm.minLogTotalCap { 365 cm.logTotalCap = cm.minLogTotalCap 366 } 367 cm.updateTotalCapacity(now, true) 368 } 369 370 // updateTotalCapacity updates the total capacity factor. The capacity factor allows 371 // the total capacity of the system to go over the allowed total recharge value 372 // if clients go to frozen state sufficiently rarely. 373 // The capacity factor is dropped instantly by a small amount if a clients is frozen. 374 // It is raised slowly (with a large time constant) if the total connected capacity 375 // is close to the total allowed amount and no clients are frozen. 376 func (cm *ClientManager) updateTotalCapacity(now mclock.AbsTime, refresh bool) { 377 dt := now - cm.capLastUpdate 378 cm.capLastUpdate = now 379 380 if cm.logTotalCap < cm.logTotalCapRaiseLimit { 381 cm.logTotalCap += capacityRaiseTC * float64(dt) 382 if cm.logTotalCap > cm.logTotalCapRaiseLimit { 383 cm.logTotalCap = cm.logTotalCapRaiseLimit 384 } 385 } 386 if cm.logTotalCap > cm.maxLogTotalCap { 387 cm.logTotalCap = cm.maxLogTotalCap 388 } 389 if refresh { 390 cm.refreshCapacity() 391 } 392 } 393 394 // refreshCapacity recalculates the total capacity value and sends an update to the subscription 395 // channel if the relative change of the value since the last update is more than 0.1 percent 396 func (cm *ClientManager) refreshCapacity() { 397 totalCapacity := math.Exp(cm.logTotalCap) 398 if totalCapacity >= cm.totalCapacity*0.999 && totalCapacity <= cm.totalCapacity*1.001 { 399 return 400 } 401 cm.totalCapacity = totalCapacity 402 if cm.totalCapacityCh != nil { 403 select { 404 case cm.totalCapacityCh <- uint64(cm.totalCapacity): 405 default: 406 } 407 } 408 } 409 410 // SubscribeTotalCapacity returns all future updates to the total capacity value 411 // through a channel and also returns the current value 412 func (cm *ClientManager) SubscribeTotalCapacity(ch chan uint64) uint64 { 413 cm.lock.Lock() 414 defer cm.lock.Unlock() 415 416 cm.totalCapacityCh = ch 417 return uint64(cm.totalCapacity) 418 } 419 420 // PieceWiseLinear is used to describe recharge curves 421 type PieceWiseLinear []struct{ X, Y uint64 } 422 423 // ValueAt returns the curve's value at a given point 424 func (pwl PieceWiseLinear) ValueAt(x uint64) float64 { 425 l := 0 426 h := len(pwl) 427 if h == 0 { 428 return 0 429 } 430 for h != l { 431 m := (l + h) / 2 432 if x > pwl[m].X { 433 l = m + 1 434 } else { 435 h = m 436 } 437 } 438 if l == 0 { 439 return float64(pwl[0].Y) 440 } 441 l-- 442 if h == len(pwl) { 443 return float64(pwl[l].Y) 444 } 445 dx := pwl[h].X - pwl[l].X 446 if dx < 1 { 447 return float64(pwl[l].Y) 448 } 449 return float64(pwl[l].Y) + float64(pwl[h].Y-pwl[l].Y)*float64(x-pwl[l].X)/float64(dx) 450 } 451 452 // Valid returns true if the X coordinates of the curve points are non-strictly monotonic 453 func (pwl PieceWiseLinear) Valid() bool { 454 var lastX uint64 455 for _, i := range pwl { 456 if i.X < lastX { 457 return false 458 } 459 lastX = i.X 460 } 461 return true 462 }