github.com/hardtosaygoodbye/go-ethereum@v1.10.16-0.20220122011429-97003b9e6c15/les/costtracker.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package les 18 19 import ( 20 "encoding/binary" 21 "math" 22 "sync" 23 "sync/atomic" 24 "time" 25 26 "github.com/hardtosaygoodbye/go-ethereum/common/mclock" 27 "github.com/hardtosaygoodbye/go-ethereum/eth/ethconfig" 28 "github.com/hardtosaygoodbye/go-ethereum/ethdb" 29 "github.com/hardtosaygoodbye/go-ethereum/les/flowcontrol" 30 "github.com/hardtosaygoodbye/go-ethereum/log" 31 "github.com/hardtosaygoodbye/go-ethereum/metrics" 32 ) 33 34 const makeCostStats = false // make request cost statistics during operation 35 36 var ( 37 // average request cost estimates based on serving time 38 reqAvgTimeCost = requestCostTable{ 39 GetBlockHeadersMsg: {150000, 30000}, 40 GetBlockBodiesMsg: {0, 700000}, 41 GetReceiptsMsg: {0, 1000000}, 42 GetCodeMsg: {0, 450000}, 43 GetProofsV2Msg: {0, 600000}, 44 GetHelperTrieProofsMsg: {0, 1000000}, 45 SendTxV2Msg: {0, 450000}, 46 GetTxStatusMsg: {0, 250000}, 47 } 48 // maximum incoming message size estimates 49 reqMaxInSize = requestCostTable{ 50 GetBlockHeadersMsg: {40, 0}, 51 GetBlockBodiesMsg: {0, 40}, 52 GetReceiptsMsg: {0, 40}, 53 GetCodeMsg: {0, 80}, 54 GetProofsV2Msg: {0, 80}, 55 GetHelperTrieProofsMsg: {0, 20}, 56 SendTxV2Msg: {0, 16500}, 57 GetTxStatusMsg: {0, 50}, 58 } 59 // maximum outgoing message size estimates 60 reqMaxOutSize = requestCostTable{ 61 GetBlockHeadersMsg: {0, 556}, 62 GetBlockBodiesMsg: {0, 100000}, 63 GetReceiptsMsg: {0, 200000}, 64 GetCodeMsg: {0, 50000}, 65 GetProofsV2Msg: {0, 4000}, 66 GetHelperTrieProofsMsg: {0, 4000}, 67 SendTxV2Msg: {0, 100}, 68 GetTxStatusMsg: {0, 100}, 69 } 70 // request amounts that have to fit into the minimum buffer size minBufferMultiplier times 71 minBufferReqAmount = map[uint64]uint64{ 72 GetBlockHeadersMsg: 192, 73 GetBlockBodiesMsg: 1, 74 GetReceiptsMsg: 1, 75 GetCodeMsg: 1, 76 GetProofsV2Msg: 1, 77 GetHelperTrieProofsMsg: 16, 78 SendTxV2Msg: 8, 79 GetTxStatusMsg: 64, 80 } 81 minBufferMultiplier = 3 82 ) 83 84 const ( 85 maxCostFactor = 2 // ratio of maximum and average cost estimates 86 bufLimitRatio = 6000 // fixed bufLimit/MRR ratio 87 gfUsageThreshold = 0.5 88 gfUsageTC = time.Second 89 gfRaiseTC = time.Second * 200 90 gfDropTC = time.Second * 50 91 gfDbKey = "_globalCostFactorV6" 92 ) 93 94 // costTracker is responsible for calculating costs and cost estimates on the 95 // server side. It continuously updates the global cost factor which is defined 96 // as the number of cost units per nanosecond of serving time in a single thread. 97 // It is based on statistics collected during serving requests in high-load periods 98 // and practically acts as a one-dimension request price scaling factor over the 99 // pre-defined cost estimate table. 100 // 101 // The reason for dynamically maintaining the global factor on the server side is: 102 // the estimated time cost of the request is fixed(hardcoded) but the configuration 103 // of the machine running the server is really different. Therefore, the request serving 104 // time in different machine will vary greatly. And also, the request serving time 105 // in same machine may vary greatly with different request pressure. 106 // 107 // In order to more effectively limit resources, we apply the global factor to serving 108 // time to make the result as close as possible to the estimated time cost no matter 109 // the server is slow or fast. And also we scale the totalRecharge with global factor 110 // so that fast server can serve more requests than estimation and slow server can 111 // reduce request pressure. 112 // 113 // Instead of scaling the cost values, the real value of cost units is changed by 114 // applying the factor to the serving times. This is more convenient because the 115 // changes in the cost factor can be applied immediately without always notifying 116 // the clients about the changed cost tables. 117 type costTracker struct { 118 db ethdb.Database 119 stopCh chan chan struct{} 120 121 inSizeFactor float64 122 outSizeFactor float64 123 factor float64 124 utilTarget float64 125 minBufLimit uint64 126 127 gfLock sync.RWMutex 128 reqInfoCh chan reqInfo 129 totalRechargeCh chan uint64 130 131 stats map[uint64][]uint64 // Used for testing purpose. 132 133 // TestHooks 134 testing bool // Disable real cost evaluation for testing purpose. 135 testCostList RequestCostList // Customized cost table for testing purpose. 136 } 137 138 // newCostTracker creates a cost tracker and loads the cost factor statistics from the database. 139 // It also returns the minimum capacity that can be assigned to any peer. 140 func newCostTracker(db ethdb.Database, config *ethconfig.Config) (*costTracker, uint64) { 141 utilTarget := float64(config.LightServ) * flowcontrol.FixedPointMultiplier / 100 142 ct := &costTracker{ 143 db: db, 144 stopCh: make(chan chan struct{}), 145 reqInfoCh: make(chan reqInfo, 100), 146 utilTarget: utilTarget, 147 } 148 if config.LightIngress > 0 { 149 ct.inSizeFactor = utilTarget / float64(config.LightIngress) 150 } 151 if config.LightEgress > 0 { 152 ct.outSizeFactor = utilTarget / float64(config.LightEgress) 153 } 154 if makeCostStats { 155 ct.stats = make(map[uint64][]uint64) 156 for code := range reqAvgTimeCost { 157 ct.stats[code] = make([]uint64, 10) 158 } 159 } 160 ct.gfLoop() 161 costList := ct.makeCostList(ct.globalFactor() * 1.25) 162 for _, c := range costList { 163 amount := minBufferReqAmount[c.MsgCode] 164 cost := c.BaseCost + amount*c.ReqCost 165 if cost > ct.minBufLimit { 166 ct.minBufLimit = cost 167 } 168 } 169 ct.minBufLimit *= uint64(minBufferMultiplier) 170 return ct, (ct.minBufLimit-1)/bufLimitRatio + 1 171 } 172 173 // stop stops the cost tracker and saves the cost factor statistics to the database 174 func (ct *costTracker) stop() { 175 stopCh := make(chan struct{}) 176 ct.stopCh <- stopCh 177 <-stopCh 178 if makeCostStats { 179 ct.printStats() 180 } 181 } 182 183 // makeCostList returns upper cost estimates based on the hardcoded cost estimate 184 // tables and the optionally specified incoming/outgoing bandwidth limits 185 func (ct *costTracker) makeCostList(globalFactor float64) RequestCostList { 186 maxCost := func(avgTimeCost, inSize, outSize uint64) uint64 { 187 cost := avgTimeCost * maxCostFactor 188 inSizeCost := uint64(float64(inSize) * ct.inSizeFactor * globalFactor) 189 if inSizeCost > cost { 190 cost = inSizeCost 191 } 192 outSizeCost := uint64(float64(outSize) * ct.outSizeFactor * globalFactor) 193 if outSizeCost > cost { 194 cost = outSizeCost 195 } 196 return cost 197 } 198 var list RequestCostList 199 for code, data := range reqAvgTimeCost { 200 baseCost := maxCost(data.baseCost, reqMaxInSize[code].baseCost, reqMaxOutSize[code].baseCost) 201 reqCost := maxCost(data.reqCost, reqMaxInSize[code].reqCost, reqMaxOutSize[code].reqCost) 202 if ct.minBufLimit != 0 { 203 // if minBufLimit is set then always enforce maximum request cost <= minBufLimit 204 maxCost := baseCost + reqCost*minBufferReqAmount[code] 205 if maxCost > ct.minBufLimit { 206 mul := 0.999 * float64(ct.minBufLimit) / float64(maxCost) 207 baseCost = uint64(float64(baseCost) * mul) 208 reqCost = uint64(float64(reqCost) * mul) 209 } 210 } 211 212 list = append(list, requestCostListItem{ 213 MsgCode: code, 214 BaseCost: baseCost, 215 ReqCost: reqCost, 216 }) 217 } 218 return list 219 } 220 221 // reqInfo contains the estimated time cost and the actual request serving time 222 // which acts as a feed source to update factor maintained by costTracker. 223 type reqInfo struct { 224 // avgTimeCost is the estimated time cost corresponding to maxCostTable. 225 avgTimeCost float64 226 227 // servingTime is the CPU time corresponding to the actual processing of 228 // the request. 229 servingTime float64 230 231 // msgCode indicates the type of request. 232 msgCode uint64 233 } 234 235 // gfLoop starts an event loop which updates the global cost factor which is 236 // calculated as a weighted average of the average estimate / serving time ratio. 237 // The applied weight equals the serving time if gfUsage is over a threshold, 238 // zero otherwise. gfUsage is the recent average serving time per time unit in 239 // an exponential moving window. This ensures that statistics are collected only 240 // under high-load circumstances where the measured serving times are relevant. 241 // The total recharge parameter of the flow control system which controls the 242 // total allowed serving time per second but nominated in cost units, should 243 // also be scaled with the cost factor and is also updated by this loop. 244 func (ct *costTracker) gfLoop() { 245 var ( 246 factor, totalRecharge float64 247 gfLog, recentTime, recentAvg float64 248 249 lastUpdate, expUpdate = mclock.Now(), mclock.Now() 250 ) 251 252 // Load historical cost factor statistics from the database. 253 data, _ := ct.db.Get([]byte(gfDbKey)) 254 if len(data) == 8 { 255 gfLog = math.Float64frombits(binary.BigEndian.Uint64(data[:])) 256 } 257 ct.factor = math.Exp(gfLog) 258 factor, totalRecharge = ct.factor, ct.utilTarget*ct.factor 259 260 // In order to perform factor data statistics under the high request pressure, 261 // we only adjust factor when recent factor usage beyond the threshold. 262 threshold := gfUsageThreshold * float64(gfUsageTC) * ct.utilTarget / flowcontrol.FixedPointMultiplier 263 264 go func() { 265 saveCostFactor := func() { 266 var data [8]byte 267 binary.BigEndian.PutUint64(data[:], math.Float64bits(gfLog)) 268 ct.db.Put([]byte(gfDbKey), data[:]) 269 log.Debug("global cost factor saved", "value", factor) 270 } 271 saveTicker := time.NewTicker(time.Minute * 10) 272 defer saveTicker.Stop() 273 274 for { 275 select { 276 case r := <-ct.reqInfoCh: 277 relCost := int64(factor * r.servingTime * 100 / r.avgTimeCost) // Convert the value to a percentage form 278 279 // Record more metrics if we are debugging 280 if metrics.EnabledExpensive { 281 switch r.msgCode { 282 case GetBlockHeadersMsg: 283 relativeCostHeaderHistogram.Update(relCost) 284 case GetBlockBodiesMsg: 285 relativeCostBodyHistogram.Update(relCost) 286 case GetReceiptsMsg: 287 relativeCostReceiptHistogram.Update(relCost) 288 case GetCodeMsg: 289 relativeCostCodeHistogram.Update(relCost) 290 case GetProofsV2Msg: 291 relativeCostProofHistogram.Update(relCost) 292 case GetHelperTrieProofsMsg: 293 relativeCostHelperProofHistogram.Update(relCost) 294 case SendTxV2Msg: 295 relativeCostSendTxHistogram.Update(relCost) 296 case GetTxStatusMsg: 297 relativeCostTxStatusHistogram.Update(relCost) 298 } 299 } 300 // SendTxV2 and GetTxStatus requests are two special cases. 301 // All other requests will only put pressure on the database, and 302 // the corresponding delay is relatively stable. While these two 303 // requests involve txpool query, which is usually unstable. 304 // 305 // TODO(rjl493456442) fixes this. 306 if r.msgCode == SendTxV2Msg || r.msgCode == GetTxStatusMsg { 307 continue 308 } 309 requestServedMeter.Mark(int64(r.servingTime)) 310 requestServedTimer.Update(time.Duration(r.servingTime)) 311 requestEstimatedMeter.Mark(int64(r.avgTimeCost / factor)) 312 requestEstimatedTimer.Update(time.Duration(r.avgTimeCost / factor)) 313 relativeCostHistogram.Update(relCost) 314 315 now := mclock.Now() 316 dt := float64(now - expUpdate) 317 expUpdate = now 318 exp := math.Exp(-dt / float64(gfUsageTC)) 319 320 // calculate factor correction until now, based on previous values 321 var gfCorr float64 322 max := recentTime 323 if recentAvg > max { 324 max = recentAvg 325 } 326 // we apply continuous correction when MAX(recentTime, recentAvg) > threshold 327 if max > threshold { 328 // calculate correction time between last expUpdate and now 329 if max*exp >= threshold { 330 gfCorr = dt 331 } else { 332 gfCorr = math.Log(max/threshold) * float64(gfUsageTC) 333 } 334 // calculate log(factor) correction with the right direction and time constant 335 if recentTime > recentAvg { 336 // drop factor if actual serving times are larger than average estimates 337 gfCorr /= -float64(gfDropTC) 338 } else { 339 // raise factor if actual serving times are smaller than average estimates 340 gfCorr /= float64(gfRaiseTC) 341 } 342 } 343 // update recent cost values with current request 344 recentTime = recentTime*exp + r.servingTime 345 recentAvg = recentAvg*exp + r.avgTimeCost/factor 346 347 if gfCorr != 0 { 348 // Apply the correction to factor 349 gfLog += gfCorr 350 factor = math.Exp(gfLog) 351 // Notify outside modules the new factor and totalRecharge. 352 if time.Duration(now-lastUpdate) > time.Second { 353 totalRecharge, lastUpdate = ct.utilTarget*factor, now 354 ct.gfLock.Lock() 355 ct.factor = factor 356 ch := ct.totalRechargeCh 357 ct.gfLock.Unlock() 358 if ch != nil { 359 select { 360 case ct.totalRechargeCh <- uint64(totalRecharge): 361 default: 362 } 363 } 364 globalFactorGauge.Update(int64(1000 * factor)) 365 log.Debug("global cost factor updated", "factor", factor) 366 } 367 } 368 recentServedGauge.Update(int64(recentTime)) 369 recentEstimatedGauge.Update(int64(recentAvg)) 370 371 case <-saveTicker.C: 372 saveCostFactor() 373 374 case stopCh := <-ct.stopCh: 375 saveCostFactor() 376 close(stopCh) 377 return 378 } 379 } 380 }() 381 } 382 383 // globalFactor returns the current value of the global cost factor 384 func (ct *costTracker) globalFactor() float64 { 385 ct.gfLock.RLock() 386 defer ct.gfLock.RUnlock() 387 388 return ct.factor 389 } 390 391 // totalRecharge returns the current total recharge parameter which is used by 392 // flowcontrol.ClientManager and is scaled by the global cost factor 393 func (ct *costTracker) totalRecharge() uint64 { 394 ct.gfLock.RLock() 395 defer ct.gfLock.RUnlock() 396 397 return uint64(ct.factor * ct.utilTarget) 398 } 399 400 // subscribeTotalRecharge returns all future updates to the total recharge value 401 // through a channel and also returns the current value 402 func (ct *costTracker) subscribeTotalRecharge(ch chan uint64) uint64 { 403 ct.gfLock.Lock() 404 defer ct.gfLock.Unlock() 405 406 ct.totalRechargeCh = ch 407 return uint64(ct.factor * ct.utilTarget) 408 } 409 410 // updateStats updates the global cost factor and (if enabled) the real cost vs. 411 // average estimate statistics 412 func (ct *costTracker) updateStats(code, amount, servingTime, realCost uint64) { 413 avg := reqAvgTimeCost[code] 414 avgTimeCost := avg.baseCost + amount*avg.reqCost 415 select { 416 case ct.reqInfoCh <- reqInfo{float64(avgTimeCost), float64(servingTime), code}: 417 default: 418 } 419 if makeCostStats { 420 realCost <<= 4 421 l := 0 422 for l < 9 && realCost > avgTimeCost { 423 l++ 424 realCost >>= 1 425 } 426 atomic.AddUint64(&ct.stats[code][l], 1) 427 } 428 } 429 430 // realCost calculates the final cost of a request based on actual serving time, 431 // incoming and outgoing message size 432 // 433 // Note: message size is only taken into account if bandwidth limitation is applied 434 // and the cost based on either message size is greater than the cost based on 435 // serving time. A maximum of the three costs is applied instead of their sum 436 // because the three limited resources (serving thread time and i/o bandwidth) can 437 // also be maxed out simultaneously. 438 func (ct *costTracker) realCost(servingTime uint64, inSize, outSize uint32) uint64 { 439 cost := float64(servingTime) 440 inSizeCost := float64(inSize) * ct.inSizeFactor 441 if inSizeCost > cost { 442 cost = inSizeCost 443 } 444 outSizeCost := float64(outSize) * ct.outSizeFactor 445 if outSizeCost > cost { 446 cost = outSizeCost 447 } 448 return uint64(cost * ct.globalFactor()) 449 } 450 451 // printStats prints the distribution of real request cost relative to the average estimates 452 func (ct *costTracker) printStats() { 453 if ct.stats == nil { 454 return 455 } 456 for code, arr := range ct.stats { 457 log.Info("Request cost statistics", "code", code, "1/16", arr[0], "1/8", arr[1], "1/4", arr[2], "1/2", arr[3], "1", arr[4], "2", arr[5], "4", arr[6], "8", arr[7], "16", arr[8], ">16", arr[9]) 458 } 459 } 460 461 type ( 462 // requestCostTable assigns a cost estimate function to each request type 463 // which is a linear function of the requested amount 464 // (cost = baseCost + reqCost * amount) 465 requestCostTable map[uint64]*requestCosts 466 requestCosts struct { 467 baseCost, reqCost uint64 468 } 469 470 // RequestCostList is a list representation of request costs which is used for 471 // database storage and communication through the network 472 RequestCostList []requestCostListItem 473 requestCostListItem struct { 474 MsgCode, BaseCost, ReqCost uint64 475 } 476 ) 477 478 // getMaxCost calculates the estimated cost for a given request type and amount 479 func (table requestCostTable) getMaxCost(code, amount uint64) uint64 { 480 costs := table[code] 481 return costs.baseCost + amount*costs.reqCost 482 } 483 484 // decode converts a cost list to a cost table 485 func (list RequestCostList) decode(protocolLength uint64) requestCostTable { 486 table := make(requestCostTable) 487 for _, e := range list { 488 if e.MsgCode < protocolLength { 489 table[e.MsgCode] = &requestCosts{ 490 baseCost: e.BaseCost, 491 reqCost: e.ReqCost, 492 } 493 } 494 } 495 return table 496 } 497 498 // testCostList returns a dummy request cost list used by tests 499 func testCostList(testCost uint64) RequestCostList { 500 cl := make(RequestCostList, len(reqAvgTimeCost)) 501 var max uint64 502 for code := range reqAvgTimeCost { 503 if code > max { 504 max = code 505 } 506 } 507 i := 0 508 for code := uint64(0); code <= max; code++ { 509 if _, ok := reqAvgTimeCost[code]; ok { 510 cl[i].MsgCode = code 511 cl[i].BaseCost = testCost 512 cl[i].ReqCost = 0 513 i++ 514 } 515 } 516 return cl 517 }