github.com/minh-tin/go-ethereum@v1.9.7/les/costtracker.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package les 18 19 import ( 20 "encoding/binary" 21 "math" 22 "sync" 23 "sync/atomic" 24 "time" 25 26 "github.com/ethereum/go-ethereum/common/mclock" 27 "github.com/ethereum/go-ethereum/eth" 28 "github.com/ethereum/go-ethereum/ethdb" 29 "github.com/ethereum/go-ethereum/les/flowcontrol" 30 "github.com/ethereum/go-ethereum/log" 31 "github.com/ethereum/go-ethereum/metrics" 32 ) 33 34 const makeCostStats = false // make request cost statistics during operation 35 36 var ( 37 // average request cost estimates based on serving time 38 reqAvgTimeCost = requestCostTable{ 39 GetBlockHeadersMsg: {150000, 30000}, 40 GetBlockBodiesMsg: {0, 700000}, 41 GetReceiptsMsg: {0, 1000000}, 42 GetCodeMsg: {0, 450000}, 43 GetProofsV2Msg: {0, 600000}, 44 GetHelperTrieProofsMsg: {0, 1000000}, 45 SendTxV2Msg: {0, 450000}, 46 GetTxStatusMsg: {0, 250000}, 47 } 48 // maximum incoming message size estimates 49 reqMaxInSize = requestCostTable{ 50 GetBlockHeadersMsg: {40, 0}, 51 GetBlockBodiesMsg: {0, 40}, 52 GetReceiptsMsg: {0, 40}, 53 GetCodeMsg: {0, 80}, 54 GetProofsV2Msg: {0, 80}, 55 GetHelperTrieProofsMsg: {0, 20}, 56 SendTxV2Msg: {0, 16500}, 57 GetTxStatusMsg: {0, 50}, 58 } 59 // maximum outgoing message size estimates 60 reqMaxOutSize = requestCostTable{ 61 GetBlockHeadersMsg: {0, 556}, 62 GetBlockBodiesMsg: {0, 100000}, 63 GetReceiptsMsg: {0, 200000}, 64 GetCodeMsg: {0, 50000}, 65 GetProofsV2Msg: {0, 4000}, 66 GetHelperTrieProofsMsg: {0, 4000}, 67 SendTxV2Msg: {0, 100}, 68 GetTxStatusMsg: {0, 100}, 69 } 70 // request amounts that have to fit into the minimum buffer size minBufferMultiplier times 71 minBufferReqAmount = map[uint64]uint64{ 72 GetBlockHeadersMsg: 192, 73 GetBlockBodiesMsg: 1, 74 GetReceiptsMsg: 1, 75 GetCodeMsg: 1, 76 GetProofsV2Msg: 1, 77 GetHelperTrieProofsMsg: 16, 78 SendTxV2Msg: 8, 79 GetTxStatusMsg: 64, 80 } 81 minBufferMultiplier = 3 82 ) 83 84 const ( 85 maxCostFactor = 2 // ratio of maximum and average cost estimates 86 bufLimitRatio = 6000 // fixed bufLimit/MRR ratio 87 gfUsageThreshold = 0.5 88 gfUsageTC = time.Second 89 gfRaiseTC = time.Second * 200 90 gfDropTC = time.Second * 50 91 gfDbKey = "_globalCostFactorV6" 92 ) 93 94 // costTracker is responsible for calculating costs and cost estimates on the 95 // server side. It continuously updates the global cost factor which is defined 96 // as the number of cost units per nanosecond of serving time in a single thread. 97 // It is based on statistics collected during serving requests in high-load periods 98 // and practically acts as a one-dimension request price scaling factor over the 99 // pre-defined cost estimate table. 100 // 101 // The reason for dynamically maintaining the global factor on the server side is: 102 // the estimated time cost of the request is fixed(hardcoded) but the configuration 103 // of the machine running the server is really different. Therefore, the request serving 104 // time in different machine will vary greatly. And also, the request serving time 105 // in same machine may vary greatly with different request pressure. 106 // 107 // In order to more effectively limit resources, we apply the global factor to serving 108 // time to make the result as close as possible to the estimated time cost no matter 109 // the server is slow or fast. And also we scale the totalRecharge with global factor 110 // so that fast server can serve more requests than estimation and slow server can 111 // reduce request pressure. 112 // 113 // Instead of scaling the cost values, the real value of cost units is changed by 114 // applying the factor to the serving times. This is more convenient because the 115 // changes in the cost factor can be applied immediately without always notifying 116 // the clients about the changed cost tables. 117 type costTracker struct { 118 db ethdb.Database 119 stopCh chan chan struct{} 120 121 inSizeFactor float64 122 outSizeFactor float64 123 factor float64 124 utilTarget float64 125 minBufLimit uint64 126 127 gfLock sync.RWMutex 128 reqInfoCh chan reqInfo 129 totalRechargeCh chan uint64 130 131 stats map[uint64][]uint64 // Used for testing purpose. 132 133 // TestHooks 134 testing bool // Disable real cost evaluation for testing purpose. 135 testCostList RequestCostList // Customized cost table for testing purpose. 136 } 137 138 // newCostTracker creates a cost tracker and loads the cost factor statistics from the database. 139 // It also returns the minimum capacity that can be assigned to any peer. 140 func newCostTracker(db ethdb.Database, config *eth.Config) (*costTracker, uint64) { 141 utilTarget := float64(config.LightServ) * flowcontrol.FixedPointMultiplier / 100 142 ct := &costTracker{ 143 db: db, 144 stopCh: make(chan chan struct{}), 145 reqInfoCh: make(chan reqInfo, 100), 146 utilTarget: utilTarget, 147 } 148 if config.LightIngress > 0 { 149 ct.inSizeFactor = utilTarget / float64(config.LightIngress) 150 } 151 if config.LightEgress > 0 { 152 ct.outSizeFactor = utilTarget / float64(config.LightEgress) 153 } 154 if makeCostStats { 155 ct.stats = make(map[uint64][]uint64) 156 for code := range reqAvgTimeCost { 157 ct.stats[code] = make([]uint64, 10) 158 } 159 } 160 ct.gfLoop() 161 costList := ct.makeCostList(ct.globalFactor() * 1.25) 162 for _, c := range costList { 163 amount := minBufferReqAmount[c.MsgCode] 164 cost := c.BaseCost + amount*c.ReqCost 165 if cost > ct.minBufLimit { 166 ct.minBufLimit = cost 167 } 168 } 169 ct.minBufLimit *= uint64(minBufferMultiplier) 170 return ct, (ct.minBufLimit-1)/bufLimitRatio + 1 171 } 172 173 // stop stops the cost tracker and saves the cost factor statistics to the database 174 func (ct *costTracker) stop() { 175 stopCh := make(chan struct{}) 176 ct.stopCh <- stopCh 177 <-stopCh 178 if makeCostStats { 179 ct.printStats() 180 } 181 } 182 183 // makeCostList returns upper cost estimates based on the hardcoded cost estimate 184 // tables and the optionally specified incoming/outgoing bandwidth limits 185 func (ct *costTracker) makeCostList(globalFactor float64) RequestCostList { 186 maxCost := func(avgTimeCost, inSize, outSize uint64) uint64 { 187 cost := avgTimeCost * maxCostFactor 188 inSizeCost := uint64(float64(inSize) * ct.inSizeFactor * globalFactor) 189 if inSizeCost > cost { 190 cost = inSizeCost 191 } 192 outSizeCost := uint64(float64(outSize) * ct.outSizeFactor * globalFactor) 193 if outSizeCost > cost { 194 cost = outSizeCost 195 } 196 return cost 197 } 198 var list RequestCostList 199 for code, data := range reqAvgTimeCost { 200 baseCost := maxCost(data.baseCost, reqMaxInSize[code].baseCost, reqMaxOutSize[code].baseCost) 201 reqCost := maxCost(data.reqCost, reqMaxInSize[code].reqCost, reqMaxOutSize[code].reqCost) 202 if ct.minBufLimit != 0 { 203 // if minBufLimit is set then always enforce maximum request cost <= minBufLimit 204 maxCost := baseCost + reqCost*minBufferReqAmount[code] 205 if maxCost > ct.minBufLimit { 206 mul := 0.999 * float64(ct.minBufLimit) / float64(maxCost) 207 baseCost = uint64(float64(baseCost) * mul) 208 reqCost = uint64(float64(reqCost) * mul) 209 } 210 } 211 212 list = append(list, requestCostListItem{ 213 MsgCode: code, 214 BaseCost: baseCost, 215 ReqCost: reqCost, 216 }) 217 } 218 return list 219 } 220 221 // reqInfo contains the estimated time cost and the actual request serving time 222 // which acts as a feed source to update factor maintained by costTracker. 223 type reqInfo struct { 224 // avgTimeCost is the estimated time cost corresponding to maxCostTable. 225 avgTimeCost float64 226 227 // servingTime is the CPU time corresponding to the actual processing of 228 // the request. 229 servingTime float64 230 231 // msgCode indicates the type of request. 232 msgCode uint64 233 } 234 235 // gfLoop starts an event loop which updates the global cost factor which is 236 // calculated as a weighted average of the average estimate / serving time ratio. 237 // The applied weight equals the serving time if gfUsage is over a threshold, 238 // zero otherwise. gfUsage is the recent average serving time per time unit in 239 // an exponential moving window. This ensures that statistics are collected only 240 // under high-load circumstances where the measured serving times are relevant. 241 // The total recharge parameter of the flow control system which controls the 242 // total allowed serving time per second but nominated in cost units, should 243 // also be scaled with the cost factor and is also updated by this loop. 244 func (ct *costTracker) gfLoop() { 245 var ( 246 factor, totalRecharge float64 247 gfLog, recentTime, recentAvg float64 248 249 lastUpdate, expUpdate = mclock.Now(), mclock.Now() 250 ) 251 252 // Load historical cost factor statistics from the database. 253 data, _ := ct.db.Get([]byte(gfDbKey)) 254 if len(data) == 8 { 255 gfLog = math.Float64frombits(binary.BigEndian.Uint64(data[:])) 256 } 257 ct.factor = math.Exp(gfLog) 258 factor, totalRecharge = ct.factor, ct.utilTarget*ct.factor 259 260 // In order to perform factor data statistics under the high request pressure, 261 // we only adjust factor when recent factor usage beyond the threshold. 262 threshold := gfUsageThreshold * float64(gfUsageTC) * ct.utilTarget / flowcontrol.FixedPointMultiplier 263 264 go func() { 265 saveCostFactor := func() { 266 var data [8]byte 267 binary.BigEndian.PutUint64(data[:], math.Float64bits(gfLog)) 268 ct.db.Put([]byte(gfDbKey), data[:]) 269 log.Debug("global cost factor saved", "value", factor) 270 } 271 saveTicker := time.NewTicker(time.Minute * 10) 272 273 for { 274 select { 275 case r := <-ct.reqInfoCh: 276 relCost := int64(factor * r.servingTime * 100 / r.avgTimeCost) // Convert the value to a percentage form 277 278 // Record more metrics if we are debugging 279 if metrics.EnabledExpensive { 280 switch r.msgCode { 281 case GetBlockHeadersMsg: 282 relativeCostHeaderHistogram.Update(relCost) 283 case GetBlockBodiesMsg: 284 relativeCostBodyHistogram.Update(relCost) 285 case GetReceiptsMsg: 286 relativeCostReceiptHistogram.Update(relCost) 287 case GetCodeMsg: 288 relativeCostCodeHistogram.Update(relCost) 289 case GetProofsV2Msg: 290 relativeCostProofHistogram.Update(relCost) 291 case GetHelperTrieProofsMsg: 292 relativeCostHelperProofHistogram.Update(relCost) 293 case SendTxV2Msg: 294 relativeCostSendTxHistogram.Update(relCost) 295 case GetTxStatusMsg: 296 relativeCostTxStatusHistogram.Update(relCost) 297 } 298 } 299 // SendTxV2 and GetTxStatus requests are two special cases. 300 // All other requests will only put pressure on the database, and 301 // the corresponding delay is relatively stable. While these two 302 // requests involve txpool query, which is usually unstable. 303 // 304 // TODO(rjl493456442) fixes this. 305 if r.msgCode == SendTxV2Msg || r.msgCode == GetTxStatusMsg { 306 continue 307 } 308 requestServedMeter.Mark(int64(r.servingTime)) 309 requestServedTimer.Update(time.Duration(r.servingTime)) 310 requestEstimatedMeter.Mark(int64(r.avgTimeCost / factor)) 311 requestEstimatedTimer.Update(time.Duration(r.avgTimeCost / factor)) 312 relativeCostHistogram.Update(relCost) 313 314 now := mclock.Now() 315 dt := float64(now - expUpdate) 316 expUpdate = now 317 exp := math.Exp(-dt / float64(gfUsageTC)) 318 319 // calculate factor correction until now, based on previous values 320 var gfCorr float64 321 max := recentTime 322 if recentAvg > max { 323 max = recentAvg 324 } 325 // we apply continuous correction when MAX(recentTime, recentAvg) > threshold 326 if max > threshold { 327 // calculate correction time between last expUpdate and now 328 if max*exp >= threshold { 329 gfCorr = dt 330 } else { 331 gfCorr = math.Log(max/threshold) * float64(gfUsageTC) 332 } 333 // calculate log(factor) correction with the right direction and time constant 334 if recentTime > recentAvg { 335 // drop factor if actual serving times are larger than average estimates 336 gfCorr /= -float64(gfDropTC) 337 } else { 338 // raise factor if actual serving times are smaller than average estimates 339 gfCorr /= float64(gfRaiseTC) 340 } 341 } 342 // update recent cost values with current request 343 recentTime = recentTime*exp + r.servingTime 344 recentAvg = recentAvg*exp + r.avgTimeCost/factor 345 346 if gfCorr != 0 { 347 // Apply the correction to factor 348 gfLog += gfCorr 349 factor = math.Exp(gfLog) 350 // Notify outside modules the new factor and totalRecharge. 351 if time.Duration(now-lastUpdate) > time.Second { 352 totalRecharge, lastUpdate = ct.utilTarget*factor, now 353 ct.gfLock.Lock() 354 ct.factor = factor 355 ch := ct.totalRechargeCh 356 ct.gfLock.Unlock() 357 if ch != nil { 358 select { 359 case ct.totalRechargeCh <- uint64(totalRecharge): 360 default: 361 } 362 } 363 globalFactorGauge.Update(int64(1000 * factor)) 364 log.Debug("global cost factor updated", "factor", factor) 365 } 366 } 367 recentServedGauge.Update(int64(recentTime)) 368 recentEstimatedGauge.Update(int64(recentAvg)) 369 370 case <-saveTicker.C: 371 saveCostFactor() 372 373 case stopCh := <-ct.stopCh: 374 saveCostFactor() 375 close(stopCh) 376 return 377 } 378 } 379 }() 380 } 381 382 // globalFactor returns the current value of the global cost factor 383 func (ct *costTracker) globalFactor() float64 { 384 ct.gfLock.RLock() 385 defer ct.gfLock.RUnlock() 386 387 return ct.factor 388 } 389 390 // totalRecharge returns the current total recharge parameter which is used by 391 // flowcontrol.ClientManager and is scaled by the global cost factor 392 func (ct *costTracker) totalRecharge() uint64 { 393 ct.gfLock.RLock() 394 defer ct.gfLock.RUnlock() 395 396 return uint64(ct.factor * ct.utilTarget) 397 } 398 399 // subscribeTotalRecharge returns all future updates to the total recharge value 400 // through a channel and also returns the current value 401 func (ct *costTracker) subscribeTotalRecharge(ch chan uint64) uint64 { 402 ct.gfLock.Lock() 403 defer ct.gfLock.Unlock() 404 405 ct.totalRechargeCh = ch 406 return uint64(ct.factor * ct.utilTarget) 407 } 408 409 // updateStats updates the global cost factor and (if enabled) the real cost vs. 410 // average estimate statistics 411 func (ct *costTracker) updateStats(code, amount, servingTime, realCost uint64) { 412 avg := reqAvgTimeCost[code] 413 avgTimeCost := avg.baseCost + amount*avg.reqCost 414 select { 415 case ct.reqInfoCh <- reqInfo{float64(avgTimeCost), float64(servingTime), code}: 416 default: 417 } 418 if makeCostStats { 419 realCost <<= 4 420 l := 0 421 for l < 9 && realCost > avgTimeCost { 422 l++ 423 realCost >>= 1 424 } 425 atomic.AddUint64(&ct.stats[code][l], 1) 426 } 427 } 428 429 // realCost calculates the final cost of a request based on actual serving time, 430 // incoming and outgoing message size 431 // 432 // Note: message size is only taken into account if bandwidth limitation is applied 433 // and the cost based on either message size is greater than the cost based on 434 // serving time. A maximum of the three costs is applied instead of their sum 435 // because the three limited resources (serving thread time and i/o bandwidth) can 436 // also be maxed out simultaneously. 437 func (ct *costTracker) realCost(servingTime uint64, inSize, outSize uint32) uint64 { 438 cost := float64(servingTime) 439 inSizeCost := float64(inSize) * ct.inSizeFactor 440 if inSizeCost > cost { 441 cost = inSizeCost 442 } 443 outSizeCost := float64(outSize) * ct.outSizeFactor 444 if outSizeCost > cost { 445 cost = outSizeCost 446 } 447 return uint64(cost * ct.globalFactor()) 448 } 449 450 // printStats prints the distribution of real request cost relative to the average estimates 451 func (ct *costTracker) printStats() { 452 if ct.stats == nil { 453 return 454 } 455 for code, arr := range ct.stats { 456 log.Info("Request cost statistics", "code", code, "1/16", arr[0], "1/8", arr[1], "1/4", arr[2], "1/2", arr[3], "1", arr[4], "2", arr[5], "4", arr[6], "8", arr[7], "16", arr[8], ">16", arr[9]) 457 } 458 } 459 460 type ( 461 // requestCostTable assigns a cost estimate function to each request type 462 // which is a linear function of the requested amount 463 // (cost = baseCost + reqCost * amount) 464 requestCostTable map[uint64]*requestCosts 465 requestCosts struct { 466 baseCost, reqCost uint64 467 } 468 469 // RequestCostList is a list representation of request costs which is used for 470 // database storage and communication through the network 471 RequestCostList []requestCostListItem 472 requestCostListItem struct { 473 MsgCode, BaseCost, ReqCost uint64 474 } 475 ) 476 477 // getMaxCost calculates the estimated cost for a given request type and amount 478 func (table requestCostTable) getMaxCost(code, amount uint64) uint64 { 479 costs := table[code] 480 return costs.baseCost + amount*costs.reqCost 481 } 482 483 // decode converts a cost list to a cost table 484 func (list RequestCostList) decode(protocolLength uint64) requestCostTable { 485 table := make(requestCostTable) 486 for _, e := range list { 487 if e.MsgCode < protocolLength { 488 table[e.MsgCode] = &requestCosts{ 489 baseCost: e.BaseCost, 490 reqCost: e.ReqCost, 491 } 492 } 493 } 494 return table 495 } 496 497 // testCostList returns a dummy request cost list used by tests 498 func testCostList(testCost uint64) RequestCostList { 499 cl := make(RequestCostList, len(reqAvgTimeCost)) 500 var max uint64 501 for code := range reqAvgTimeCost { 502 if code > max { 503 max = code 504 } 505 } 506 i := 0 507 for code := uint64(0); code <= max; code++ { 508 if _, ok := reqAvgTimeCost[code]; ok { 509 cl[i].MsgCode = code 510 cl[i].BaseCost = testCost 511 cl[i].ReqCost = 0 512 i++ 513 } 514 } 515 return cl 516 }