github.com/ylsGit/go-ethereum@v1.6.5/les/serverpool.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package les implements the Light Ethereum Subprotocol. 18 package les 19 20 import ( 21 "fmt" 22 "io" 23 "math" 24 "math/rand" 25 "net" 26 "strconv" 27 "sync" 28 "time" 29 30 "github.com/ethereum/go-ethereum/common/mclock" 31 "github.com/ethereum/go-ethereum/ethdb" 32 "github.com/ethereum/go-ethereum/log" 33 "github.com/ethereum/go-ethereum/p2p" 34 "github.com/ethereum/go-ethereum/p2p/discover" 35 "github.com/ethereum/go-ethereum/p2p/discv5" 36 "github.com/ethereum/go-ethereum/rlp" 37 ) 38 39 const ( 40 // After a connection has been ended or timed out, there is a waiting period 41 // before it can be selected for connection again. 42 // waiting period = base delay * (1 + random(1)) 43 // base delay = shortRetryDelay for the first shortRetryCnt times after a 44 // successful connection, after that longRetryDelay is applied 45 shortRetryCnt = 5 46 shortRetryDelay = time.Second * 5 47 longRetryDelay = time.Minute * 10 48 // maxNewEntries is the maximum number of newly discovered (never connected) nodes. 49 // If the limit is reached, the least recently discovered one is thrown out. 50 maxNewEntries = 1000 51 // maxKnownEntries is the maximum number of known (already connected) nodes. 52 // If the limit is reached, the least recently connected one is thrown out. 53 // (not that unlike new entries, known entries are persistent) 54 maxKnownEntries = 1000 55 // target for simultaneously connected servers 56 targetServerCount = 5 57 // target for servers selected from the known table 58 // (we leave room for trying new ones if there is any) 59 targetKnownSelect = 3 60 // after dialTimeout, consider the server unavailable and adjust statistics 61 dialTimeout = time.Second * 30 62 // targetConnTime is the minimum expected connection duration before a server 63 // drops a client without any specific reason 64 targetConnTime = time.Minute * 10 65 // new entry selection weight calculation based on most recent discovery time: 66 // unity until discoverExpireStart, then exponential decay with discoverExpireConst 67 discoverExpireStart = time.Minute * 20 68 discoverExpireConst = time.Minute * 20 69 // known entry selection weight is dropped by a factor of exp(-failDropLn) after 70 // each unsuccessful connection (restored after a successful one) 71 failDropLn = 0.1 72 // known node connection success and quality statistics have a long term average 73 // and a short term value which is adjusted exponentially with a factor of 74 // pstatRecentAdjust with each dial/connection and also returned exponentially 75 // to the average with the time constant pstatReturnToMeanTC 76 pstatRecentAdjust = 0.1 77 pstatReturnToMeanTC = time.Hour 78 // node address selection weight is dropped by a factor of exp(-addrFailDropLn) after 79 // each unsuccessful connection (restored after a successful one) 80 addrFailDropLn = math.Ln2 81 // responseScoreTC and delayScoreTC are exponential decay time constants for 82 // calculating selection chances from response times and block delay times 83 responseScoreTC = time.Millisecond * 100 84 delayScoreTC = time.Second * 5 85 timeoutPow = 10 86 // peerSelectMinWeight is added to calculated weights at request peer selection 87 // to give poorly performing peers a little chance of coming back 88 peerSelectMinWeight = 0.005 89 // initStatsWeight is used to initialize previously unknown peers with good 90 // statistics to give a chance to prove themselves 91 initStatsWeight = 1 92 ) 93 94 // serverPool implements a pool for storing and selecting newly discovered and already 95 // known light server nodes. It received discovered nodes, stores statistics about 96 // known nodes and takes care of always having enough good quality servers connected. 97 type serverPool struct { 98 db ethdb.Database 99 dbKey []byte 100 server *p2p.Server 101 quit chan struct{} 102 wg *sync.WaitGroup 103 connWg sync.WaitGroup 104 105 discSetPeriod chan time.Duration 106 discNodes chan *discv5.Node 107 discLookups chan bool 108 109 entries map[discover.NodeID]*poolEntry 110 lock sync.Mutex 111 timeout, enableRetry chan *poolEntry 112 adjustStats chan poolStatAdjust 113 114 knownQueue, newQueue poolEntryQueue 115 knownSelect, newSelect *weightedRandomSelect 116 knownSelected, newSelected int 117 fastDiscover bool 118 } 119 120 // newServerPool creates a new serverPool instance 121 func newServerPool(db ethdb.Database, dbPrefix []byte, server *p2p.Server, topic discv5.Topic, quit chan struct{}, wg *sync.WaitGroup) *serverPool { 122 pool := &serverPool{ 123 db: db, 124 dbKey: append(dbPrefix, []byte(topic)...), 125 server: server, 126 quit: quit, 127 wg: wg, 128 entries: make(map[discover.NodeID]*poolEntry), 129 timeout: make(chan *poolEntry, 1), 130 adjustStats: make(chan poolStatAdjust, 100), 131 enableRetry: make(chan *poolEntry, 1), 132 knownSelect: newWeightedRandomSelect(), 133 newSelect: newWeightedRandomSelect(), 134 fastDiscover: true, 135 } 136 pool.knownQueue = newPoolEntryQueue(maxKnownEntries, pool.removeEntry) 137 pool.newQueue = newPoolEntryQueue(maxNewEntries, pool.removeEntry) 138 wg.Add(1) 139 pool.loadNodes() 140 pool.checkDial() 141 142 if pool.server.DiscV5 != nil { 143 pool.discSetPeriod = make(chan time.Duration, 1) 144 pool.discNodes = make(chan *discv5.Node, 100) 145 pool.discLookups = make(chan bool, 100) 146 go pool.server.DiscV5.SearchTopic(topic, pool.discSetPeriod, pool.discNodes, pool.discLookups) 147 } 148 149 go pool.eventLoop() 150 return pool 151 } 152 153 // connect should be called upon any incoming connection. If the connection has been 154 // dialed by the server pool recently, the appropriate pool entry is returned. 155 // Otherwise, the connection should be rejected. 156 // Note that whenever a connection has been accepted and a pool entry has been returned, 157 // disconnect should also always be called. 158 func (pool *serverPool) connect(p *peer, ip net.IP, port uint16) *poolEntry { 159 pool.lock.Lock() 160 defer pool.lock.Unlock() 161 entry := pool.entries[p.ID()] 162 if entry == nil { 163 entry = pool.findOrNewNode(p.ID(), ip, port) 164 } 165 p.Log().Debug("Connecting to new peer", "state", entry.state) 166 if entry.state == psConnected || entry.state == psRegistered { 167 return nil 168 } 169 pool.connWg.Add(1) 170 entry.peer = p 171 entry.state = psConnected 172 addr := &poolEntryAddress{ 173 ip: ip, 174 port: port, 175 lastSeen: mclock.Now(), 176 } 177 entry.lastConnected = addr 178 entry.addr = make(map[string]*poolEntryAddress) 179 entry.addr[addr.strKey()] = addr 180 entry.addrSelect = *newWeightedRandomSelect() 181 entry.addrSelect.update(addr) 182 return entry 183 } 184 185 // registered should be called after a successful handshake 186 func (pool *serverPool) registered(entry *poolEntry) { 187 log.Debug("Registered new entry", "enode", entry.id) 188 pool.lock.Lock() 189 defer pool.lock.Unlock() 190 191 entry.state = psRegistered 192 entry.regTime = mclock.Now() 193 if !entry.known { 194 pool.newQueue.remove(entry) 195 entry.known = true 196 } 197 pool.knownQueue.setLatest(entry) 198 entry.shortRetry = shortRetryCnt 199 } 200 201 // disconnect should be called when ending a connection. Service quality statistics 202 // can be updated optionally (not updated if no registration happened, in this case 203 // only connection statistics are updated, just like in case of timeout) 204 func (pool *serverPool) disconnect(entry *poolEntry) { 205 log.Debug("Disconnected old entry", "enode", entry.id) 206 pool.lock.Lock() 207 defer pool.lock.Unlock() 208 209 if entry.state == psRegistered { 210 connTime := mclock.Now() - entry.regTime 211 connAdjust := float64(connTime) / float64(targetConnTime) 212 if connAdjust > 1 { 213 connAdjust = 1 214 } 215 stopped := false 216 select { 217 case <-pool.quit: 218 stopped = true 219 default: 220 } 221 if stopped { 222 entry.connectStats.add(1, connAdjust) 223 } else { 224 entry.connectStats.add(connAdjust, 1) 225 } 226 } 227 228 entry.state = psNotConnected 229 if entry.knownSelected { 230 pool.knownSelected-- 231 } else { 232 pool.newSelected-- 233 } 234 pool.setRetryDial(entry) 235 pool.connWg.Done() 236 } 237 238 const ( 239 pseBlockDelay = iota 240 pseResponseTime 241 pseResponseTimeout 242 ) 243 244 // poolStatAdjust records are sent to adjust peer block delay/response time statistics 245 type poolStatAdjust struct { 246 adjustType int 247 entry *poolEntry 248 time time.Duration 249 } 250 251 // adjustBlockDelay adjusts the block announce delay statistics of a node 252 func (pool *serverPool) adjustBlockDelay(entry *poolEntry, time time.Duration) { 253 if entry == nil { 254 return 255 } 256 pool.adjustStats <- poolStatAdjust{pseBlockDelay, entry, time} 257 } 258 259 // adjustResponseTime adjusts the request response time statistics of a node 260 func (pool *serverPool) adjustResponseTime(entry *poolEntry, time time.Duration, timeout bool) { 261 if entry == nil { 262 return 263 } 264 if timeout { 265 pool.adjustStats <- poolStatAdjust{pseResponseTimeout, entry, time} 266 } else { 267 pool.adjustStats <- poolStatAdjust{pseResponseTime, entry, time} 268 } 269 } 270 271 // eventLoop handles pool events and mutex locking for all internal functions 272 func (pool *serverPool) eventLoop() { 273 lookupCnt := 0 274 var convTime mclock.AbsTime 275 if pool.discSetPeriod != nil { 276 pool.discSetPeriod <- time.Millisecond * 100 277 } 278 for { 279 select { 280 case entry := <-pool.timeout: 281 pool.lock.Lock() 282 if !entry.removed { 283 pool.checkDialTimeout(entry) 284 } 285 pool.lock.Unlock() 286 287 case entry := <-pool.enableRetry: 288 pool.lock.Lock() 289 if !entry.removed { 290 entry.delayedRetry = false 291 pool.updateCheckDial(entry) 292 } 293 pool.lock.Unlock() 294 295 case adj := <-pool.adjustStats: 296 pool.lock.Lock() 297 switch adj.adjustType { 298 case pseBlockDelay: 299 adj.entry.delayStats.add(float64(adj.time), 1) 300 case pseResponseTime: 301 adj.entry.responseStats.add(float64(adj.time), 1) 302 adj.entry.timeoutStats.add(0, 1) 303 case pseResponseTimeout: 304 adj.entry.timeoutStats.add(1, 1) 305 } 306 pool.lock.Unlock() 307 308 case node := <-pool.discNodes: 309 pool.lock.Lock() 310 entry := pool.findOrNewNode(discover.NodeID(node.ID), node.IP, node.TCP) 311 pool.updateCheckDial(entry) 312 pool.lock.Unlock() 313 314 case conv := <-pool.discLookups: 315 if conv { 316 if lookupCnt == 0 { 317 convTime = mclock.Now() 318 } 319 lookupCnt++ 320 if pool.fastDiscover && (lookupCnt == 50 || time.Duration(mclock.Now()-convTime) > time.Minute) { 321 pool.fastDiscover = false 322 if pool.discSetPeriod != nil { 323 pool.discSetPeriod <- time.Minute 324 } 325 } 326 } 327 328 case <-pool.quit: 329 if pool.discSetPeriod != nil { 330 close(pool.discSetPeriod) 331 } 332 pool.connWg.Wait() 333 pool.saveNodes() 334 pool.wg.Done() 335 return 336 337 } 338 } 339 } 340 341 func (pool *serverPool) findOrNewNode(id discover.NodeID, ip net.IP, port uint16) *poolEntry { 342 now := mclock.Now() 343 entry := pool.entries[id] 344 if entry == nil { 345 log.Debug("Discovered new entry", "id", id) 346 entry = &poolEntry{ 347 id: id, 348 addr: make(map[string]*poolEntryAddress), 349 addrSelect: *newWeightedRandomSelect(), 350 shortRetry: shortRetryCnt, 351 } 352 pool.entries[id] = entry 353 // initialize previously unknown peers with good statistics to give a chance to prove themselves 354 entry.connectStats.add(1, initStatsWeight) 355 entry.delayStats.add(0, initStatsWeight) 356 entry.responseStats.add(0, initStatsWeight) 357 entry.timeoutStats.add(0, initStatsWeight) 358 } 359 entry.lastDiscovered = now 360 addr := &poolEntryAddress{ 361 ip: ip, 362 port: port, 363 } 364 if a, ok := entry.addr[addr.strKey()]; ok { 365 addr = a 366 } else { 367 entry.addr[addr.strKey()] = addr 368 } 369 addr.lastSeen = now 370 entry.addrSelect.update(addr) 371 if !entry.known { 372 pool.newQueue.setLatest(entry) 373 } 374 return entry 375 } 376 377 // loadNodes loads known nodes and their statistics from the database 378 func (pool *serverPool) loadNodes() { 379 enc, err := pool.db.Get(pool.dbKey) 380 if err != nil { 381 return 382 } 383 var list []*poolEntry 384 err = rlp.DecodeBytes(enc, &list) 385 if err != nil { 386 log.Debug("Failed to decode node list", "err", err) 387 return 388 } 389 for _, e := range list { 390 log.Debug("Loaded server stats", "id", e.id, "fails", e.lastConnected.fails, 391 "conn", fmt.Sprintf("%v/%v", e.connectStats.avg, e.connectStats.weight), 392 "delay", fmt.Sprintf("%v/%v", time.Duration(e.delayStats.avg), e.delayStats.weight), 393 "response", fmt.Sprintf("%v/%v", time.Duration(e.responseStats.avg), e.responseStats.weight), 394 "timeout", fmt.Sprintf("%v/%v", e.timeoutStats.avg, e.timeoutStats.weight)) 395 pool.entries[e.id] = e 396 pool.knownQueue.setLatest(e) 397 pool.knownSelect.update((*knownEntry)(e)) 398 } 399 } 400 401 // saveNodes saves known nodes and their statistics into the database. Nodes are 402 // ordered from least to most recently connected. 403 func (pool *serverPool) saveNodes() { 404 list := make([]*poolEntry, len(pool.knownQueue.queue)) 405 for i := range list { 406 list[i] = pool.knownQueue.fetchOldest() 407 } 408 enc, err := rlp.EncodeToBytes(list) 409 if err == nil { 410 pool.db.Put(pool.dbKey, enc) 411 } 412 } 413 414 // removeEntry removes a pool entry when the entry count limit is reached. 415 // Note that it is called by the new/known queues from which the entry has already 416 // been removed so removing it from the queues is not necessary. 417 func (pool *serverPool) removeEntry(entry *poolEntry) { 418 pool.newSelect.remove((*discoveredEntry)(entry)) 419 pool.knownSelect.remove((*knownEntry)(entry)) 420 entry.removed = true 421 delete(pool.entries, entry.id) 422 } 423 424 // setRetryDial starts the timer which will enable dialing a certain node again 425 func (pool *serverPool) setRetryDial(entry *poolEntry) { 426 delay := longRetryDelay 427 if entry.shortRetry > 0 { 428 entry.shortRetry-- 429 delay = shortRetryDelay 430 } 431 delay += time.Duration(rand.Int63n(int64(delay) + 1)) 432 entry.delayedRetry = true 433 go func() { 434 select { 435 case <-pool.quit: 436 case <-time.After(delay): 437 select { 438 case <-pool.quit: 439 case pool.enableRetry <- entry: 440 } 441 } 442 }() 443 } 444 445 // updateCheckDial is called when an entry can potentially be dialed again. It updates 446 // its selection weights and checks if new dials can/should be made. 447 func (pool *serverPool) updateCheckDial(entry *poolEntry) { 448 pool.newSelect.update((*discoveredEntry)(entry)) 449 pool.knownSelect.update((*knownEntry)(entry)) 450 pool.checkDial() 451 } 452 453 // checkDial checks if new dials can/should be made. It tries to select servers both 454 // based on good statistics and recent discovery. 455 func (pool *serverPool) checkDial() { 456 fillWithKnownSelects := !pool.fastDiscover 457 for pool.knownSelected < targetKnownSelect { 458 entry := pool.knownSelect.choose() 459 if entry == nil { 460 fillWithKnownSelects = false 461 break 462 } 463 pool.dial((*poolEntry)(entry.(*knownEntry)), true) 464 } 465 for pool.knownSelected+pool.newSelected < targetServerCount { 466 entry := pool.newSelect.choose() 467 if entry == nil { 468 break 469 } 470 pool.dial((*poolEntry)(entry.(*discoveredEntry)), false) 471 } 472 if fillWithKnownSelects { 473 // no more newly discovered nodes to select and since fast discover period 474 // is over, we probably won't find more in the near future so select more 475 // known entries if possible 476 for pool.knownSelected < targetServerCount { 477 entry := pool.knownSelect.choose() 478 if entry == nil { 479 break 480 } 481 pool.dial((*poolEntry)(entry.(*knownEntry)), true) 482 } 483 } 484 } 485 486 // dial initiates a new connection 487 func (pool *serverPool) dial(entry *poolEntry, knownSelected bool) { 488 if entry.state != psNotConnected { 489 return 490 } 491 entry.state = psDialed 492 entry.knownSelected = knownSelected 493 if knownSelected { 494 pool.knownSelected++ 495 } else { 496 pool.newSelected++ 497 } 498 addr := entry.addrSelect.choose().(*poolEntryAddress) 499 log.Debug("Dialing new peer", "lesaddr", entry.id.String()+"@"+addr.strKey(), "set", len(entry.addr), "known", knownSelected) 500 entry.dialed = addr 501 go func() { 502 pool.server.AddPeer(discover.NewNode(entry.id, addr.ip, addr.port, addr.port)) 503 select { 504 case <-pool.quit: 505 case <-time.After(dialTimeout): 506 select { 507 case <-pool.quit: 508 case pool.timeout <- entry: 509 } 510 } 511 }() 512 } 513 514 // checkDialTimeout checks if the node is still in dialed state and if so, resets it 515 // and adjusts connection statistics accordingly. 516 func (pool *serverPool) checkDialTimeout(entry *poolEntry) { 517 if entry.state != psDialed { 518 return 519 } 520 log.Debug("Dial timeout", "lesaddr", entry.id.String()+"@"+entry.dialed.strKey()) 521 entry.state = psNotConnected 522 if entry.knownSelected { 523 pool.knownSelected-- 524 } else { 525 pool.newSelected-- 526 } 527 entry.connectStats.add(0, 1) 528 entry.dialed.fails++ 529 pool.setRetryDial(entry) 530 } 531 532 const ( 533 psNotConnected = iota 534 psDialed 535 psConnected 536 psRegistered 537 ) 538 539 // poolEntry represents a server node and stores its current state and statistics. 540 type poolEntry struct { 541 peer *peer 542 id discover.NodeID 543 addr map[string]*poolEntryAddress 544 lastConnected, dialed *poolEntryAddress 545 addrSelect weightedRandomSelect 546 547 lastDiscovered mclock.AbsTime 548 known, knownSelected bool 549 connectStats, delayStats poolStats 550 responseStats, timeoutStats poolStats 551 state int 552 regTime mclock.AbsTime 553 queueIdx int 554 removed bool 555 556 delayedRetry bool 557 shortRetry int 558 } 559 560 func (e *poolEntry) EncodeRLP(w io.Writer) error { 561 return rlp.Encode(w, []interface{}{e.id, e.lastConnected.ip, e.lastConnected.port, e.lastConnected.fails, &e.connectStats, &e.delayStats, &e.responseStats, &e.timeoutStats}) 562 } 563 564 func (e *poolEntry) DecodeRLP(s *rlp.Stream) error { 565 var entry struct { 566 ID discover.NodeID 567 IP net.IP 568 Port uint16 569 Fails uint 570 CStat, DStat, RStat, TStat poolStats 571 } 572 if err := s.Decode(&entry); err != nil { 573 return err 574 } 575 addr := &poolEntryAddress{ip: entry.IP, port: entry.Port, fails: entry.Fails, lastSeen: mclock.Now()} 576 e.id = entry.ID 577 e.addr = make(map[string]*poolEntryAddress) 578 e.addr[addr.strKey()] = addr 579 e.addrSelect = *newWeightedRandomSelect() 580 e.addrSelect.update(addr) 581 e.lastConnected = addr 582 e.connectStats = entry.CStat 583 e.delayStats = entry.DStat 584 e.responseStats = entry.RStat 585 e.timeoutStats = entry.TStat 586 e.shortRetry = shortRetryCnt 587 e.known = true 588 return nil 589 } 590 591 // discoveredEntry implements wrsItem 592 type discoveredEntry poolEntry 593 594 // Weight calculates random selection weight for newly discovered entries 595 func (e *discoveredEntry) Weight() int64 { 596 if e.state != psNotConnected || e.delayedRetry { 597 return 0 598 } 599 t := time.Duration(mclock.Now() - e.lastDiscovered) 600 if t <= discoverExpireStart { 601 return 1000000000 602 } else { 603 return int64(1000000000 * math.Exp(-float64(t-discoverExpireStart)/float64(discoverExpireConst))) 604 } 605 } 606 607 // knownEntry implements wrsItem 608 type knownEntry poolEntry 609 610 // Weight calculates random selection weight for known entries 611 func (e *knownEntry) Weight() int64 { 612 if e.state != psNotConnected || !e.known || e.delayedRetry { 613 return 0 614 } 615 return int64(1000000000 * e.connectStats.recentAvg() * math.Exp(-float64(e.lastConnected.fails)*failDropLn-e.responseStats.recentAvg()/float64(responseScoreTC)-e.delayStats.recentAvg()/float64(delayScoreTC)) * math.Pow((1-e.timeoutStats.recentAvg()), timeoutPow)) 616 } 617 618 // poolEntryAddress is a separate object because currently it is necessary to remember 619 // multiple potential network addresses for a pool entry. This will be removed after 620 // the final implementation of v5 discovery which will retrieve signed and serial 621 // numbered advertisements, making it clear which IP/port is the latest one. 622 type poolEntryAddress struct { 623 ip net.IP 624 port uint16 625 lastSeen mclock.AbsTime // last time it was discovered, connected or loaded from db 626 fails uint // connection failures since last successful connection (persistent) 627 } 628 629 func (a *poolEntryAddress) Weight() int64 { 630 t := time.Duration(mclock.Now() - a.lastSeen) 631 return int64(1000000*math.Exp(-float64(t)/float64(discoverExpireConst)-float64(a.fails)*addrFailDropLn)) + 1 632 } 633 634 func (a *poolEntryAddress) strKey() string { 635 return a.ip.String() + ":" + strconv.Itoa(int(a.port)) 636 } 637 638 // poolStats implement statistics for a certain quantity with a long term average 639 // and a short term value which is adjusted exponentially with a factor of 640 // pstatRecentAdjust with each update and also returned exponentially to the 641 // average with the time constant pstatReturnToMeanTC 642 type poolStats struct { 643 sum, weight, avg, recent float64 644 lastRecalc mclock.AbsTime 645 } 646 647 // init initializes stats with a long term sum/update count pair retrieved from the database 648 func (s *poolStats) init(sum, weight float64) { 649 s.sum = sum 650 s.weight = weight 651 var avg float64 652 if weight > 0 { 653 avg = s.sum / weight 654 } 655 s.avg = avg 656 s.recent = avg 657 s.lastRecalc = mclock.Now() 658 } 659 660 // recalc recalculates recent value return-to-mean and long term average 661 func (s *poolStats) recalc() { 662 now := mclock.Now() 663 s.recent = s.avg + (s.recent-s.avg)*math.Exp(-float64(now-s.lastRecalc)/float64(pstatReturnToMeanTC)) 664 if s.sum == 0 { 665 s.avg = 0 666 } else { 667 if s.sum > s.weight*1e30 { 668 s.avg = 1e30 669 } else { 670 s.avg = s.sum / s.weight 671 } 672 } 673 s.lastRecalc = now 674 } 675 676 // add updates the stats with a new value 677 func (s *poolStats) add(value, weight float64) { 678 s.weight += weight 679 s.sum += value * weight 680 s.recalc() 681 } 682 683 // recentAvg returns the short-term adjusted average 684 func (s *poolStats) recentAvg() float64 { 685 s.recalc() 686 return s.recent 687 } 688 689 func (s *poolStats) EncodeRLP(w io.Writer) error { 690 return rlp.Encode(w, []interface{}{math.Float64bits(s.sum), math.Float64bits(s.weight)}) 691 } 692 693 func (s *poolStats) DecodeRLP(st *rlp.Stream) error { 694 var stats struct { 695 SumUint, WeightUint uint64 696 } 697 if err := st.Decode(&stats); err != nil { 698 return err 699 } 700 s.init(math.Float64frombits(stats.SumUint), math.Float64frombits(stats.WeightUint)) 701 return nil 702 } 703 704 // poolEntryQueue keeps track of its least recently accessed entries and removes 705 // them when the number of entries reaches the limit 706 type poolEntryQueue struct { 707 queue map[int]*poolEntry // known nodes indexed by their latest lastConnCnt value 708 newPtr, oldPtr, maxCnt int 709 removeFromPool func(*poolEntry) 710 } 711 712 // newPoolEntryQueue returns a new poolEntryQueue 713 func newPoolEntryQueue(maxCnt int, removeFromPool func(*poolEntry)) poolEntryQueue { 714 return poolEntryQueue{queue: make(map[int]*poolEntry), maxCnt: maxCnt, removeFromPool: removeFromPool} 715 } 716 717 // fetchOldest returns and removes the least recently accessed entry 718 func (q *poolEntryQueue) fetchOldest() *poolEntry { 719 if len(q.queue) == 0 { 720 return nil 721 } 722 for { 723 if e := q.queue[q.oldPtr]; e != nil { 724 delete(q.queue, q.oldPtr) 725 q.oldPtr++ 726 return e 727 } 728 q.oldPtr++ 729 } 730 } 731 732 // remove removes an entry from the queue 733 func (q *poolEntryQueue) remove(entry *poolEntry) { 734 if q.queue[entry.queueIdx] == entry { 735 delete(q.queue, entry.queueIdx) 736 } 737 } 738 739 // setLatest adds or updates a recently accessed entry. It also checks if an old entry 740 // needs to be removed and removes it from the parent pool too with a callback function. 741 func (q *poolEntryQueue) setLatest(entry *poolEntry) { 742 if q.queue[entry.queueIdx] == entry { 743 delete(q.queue, entry.queueIdx) 744 } else { 745 if len(q.queue) == q.maxCnt { 746 e := q.fetchOldest() 747 q.remove(e) 748 q.removeFromPool(e) 749 } 750 } 751 entry.queueIdx = q.newPtr 752 q.queue[entry.queueIdx] = entry 753 q.newPtr++ 754 }