github.com/klaytn/klaytn@v1.12.1/datasync/downloader/peer.go (about) 1 // Modifications Copyright 2018 The klaytn Authors 2 // Copyright 2015 The go-ethereum Authors 3 // This file is part of the go-ethereum library. 4 // 5 // The go-ethereum library is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Lesser General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // The go-ethereum library is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Lesser General Public License for more details. 14 // 15 // You should have received a copy of the GNU Lesser General Public License 16 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 17 // 18 // This file is derived from eth/downloader/peer.go (2018/06/04). 19 // Modified and improved for the klaytn development. 20 21 package downloader 22 23 import ( 24 "errors" 25 "fmt" 26 "math" 27 "math/big" 28 "sort" 29 "sync" 30 "sync/atomic" 31 "time" 32 33 "github.com/klaytn/klaytn/common" 34 "github.com/klaytn/klaytn/event" 35 "github.com/klaytn/klaytn/log" 36 ) 37 38 const ( 39 maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items 40 measurementImpact = 0.1 // The impact a single measurement has on a peer's final throughput value. 41 ) 42 43 var ( 44 errAlreadyFetching = errors.New("already fetching blocks from peer") 45 errAlreadyRegistered = errors.New("peer is already registered") 46 errNotRegistered = errors.New("peer is not registered") 47 ) 48 49 // peerConnection represents an active peer from which hashes and blocks are retrieved. 50 type peerConnection struct { 51 id string // Unique identifier of the peer 52 53 headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1) 54 blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1) 55 receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1) 56 stakingInfoIdle int32 // Current staking info activity state of the peer (idle = 0, active = 1) 57 stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1) 58 59 headerThroughput float64 // Number of headers measured to be retrievable per second 60 blockThroughput float64 // Number of blocks (bodies) measured to be retrievable per second 61 receiptThroughput float64 // Number of receipts measured to be retrievable per second 62 stakingInfoThroughput float64 // Number of staking info measured to be retrievable per second 63 stateThroughput float64 // Number of node data pieces measured to be retrievable per second 64 65 rtt time.Duration // Request round trip time to track responsiveness (QoS) 66 67 headerStarted time.Time // Time instance when the last header fetch was started 68 blockStarted time.Time // Time instance when the last block (body) fetch was started 69 receiptStarted time.Time // Time instance when the last receipt fetch was started 70 stakingInfoStarted time.Time // Time instance when the last staking info fetch was started 71 stateStarted time.Time // Time instance when the last node data fetch was started 72 73 lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously) 74 75 peer Peer 76 77 version int // Klaytn protocol version number to switch strategies 78 logger log.Logger // Contextual logger to add extra infos to peer logs 79 lock sync.RWMutex 80 } 81 82 // LightPeer encapsulates the methods required to synchronise with a remote light peer. 83 type LightPeer interface { 84 Head() (common.Hash, *big.Int) 85 RequestHeadersByHash(common.Hash, int, int, bool) error 86 RequestHeadersByNumber(uint64, int, int, bool) error 87 } 88 89 // Peer encapsulates the methods required to synchronise with a remote full peer. 90 type Peer interface { 91 LightPeer 92 RequestBodies([]common.Hash) error 93 RequestReceipts([]common.Hash) error 94 RequestStakingInfo([]common.Hash) error 95 RequestNodeData([]common.Hash) error 96 } 97 98 // lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods. 99 type lightPeerWrapper struct { 100 peer LightPeer 101 } 102 103 func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() } 104 func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool) error { 105 return w.peer.RequestHeadersByHash(h, amount, skip, reverse) 106 } 107 108 func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool) error { 109 return w.peer.RequestHeadersByNumber(i, amount, skip, reverse) 110 } 111 112 func (w *lightPeerWrapper) RequestBodies([]common.Hash) error { 113 panic("RequestBodies not supported in light client mode sync") 114 } 115 116 func (w *lightPeerWrapper) RequestReceipts([]common.Hash) error { 117 panic("RequestReceipts not supported in light client mode sync") 118 } 119 120 func (w *lightPeerWrapper) RequestStakingInfo([]common.Hash) error { 121 panic("RequestStakingInfo not supported in light client mode sync") 122 } 123 124 func (w *lightPeerWrapper) RequestNodeData([]common.Hash) error { 125 panic("RequestNodeData not supported in light client mode sync") 126 } 127 128 // newPeerConnection creates a new downloader peer. 129 func newPeerConnection(id string, version int, peer Peer, logger log.Logger) *peerConnection { 130 return &peerConnection{ 131 id: id, 132 lacking: make(map[common.Hash]struct{}), 133 134 peer: peer, 135 136 version: version, 137 logger: logger, 138 } 139 } 140 141 // Reset clears the internal state of a peer entity. 142 func (p *peerConnection) Reset() { 143 p.lock.Lock() 144 defer p.lock.Unlock() 145 146 atomic.StoreInt32(&p.headerIdle, 0) 147 atomic.StoreInt32(&p.blockIdle, 0) 148 atomic.StoreInt32(&p.receiptIdle, 0) 149 atomic.StoreInt32(&p.stateIdle, 0) 150 atomic.StoreInt32(&p.stakingInfoIdle, 0) 151 152 p.headerThroughput = 0 153 p.blockThroughput = 0 154 p.receiptThroughput = 0 155 p.stakingInfoThroughput = 0 156 p.stateThroughput = 0 157 158 p.lacking = make(map[common.Hash]struct{}) 159 } 160 161 // FetchHeaders sends a header retrieval request to the remote peer. 162 func (p *peerConnection) FetchHeaders(from uint64, count int) error { 163 // Sanity check the protocol version 164 if p.version < 62 { 165 panic(fmt.Sprintf("header fetch [klay/62+] requested on klay/%d", p.version)) 166 } 167 // Short circuit if the peer is already fetching 168 if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) { 169 return errAlreadyFetching 170 } 171 p.headerStarted = time.Now() 172 173 // Issue the header retrieval request (absolut upwards without gaps) 174 go p.peer.RequestHeadersByNumber(from, count, 0, false) 175 176 return nil 177 } 178 179 // FetchBodies sends a block body retrieval request to the remote peer. 180 func (p *peerConnection) FetchBodies(request *fetchRequest) error { 181 // Sanity check the protocol version 182 if p.version < 62 { 183 panic(fmt.Sprintf("body fetch [klay/62+] requested on klay/%d", p.version)) 184 } 185 // Short circuit if the peer is already fetching 186 if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { 187 return errAlreadyFetching 188 } 189 p.blockStarted = time.Now() 190 191 // Convert the header set to a retrievable slice 192 hashes := make([]common.Hash, 0, len(request.Headers)) 193 for _, header := range request.Headers { 194 hashes = append(hashes, header.Hash()) 195 } 196 go p.peer.RequestBodies(hashes) 197 198 return nil 199 } 200 201 // FetchReceipts sends a receipt retrieval request to the remote peer. 202 func (p *peerConnection) FetchReceipts(request *fetchRequest) error { 203 // Sanity check the protocol version 204 if p.version < 63 { 205 panic(fmt.Sprintf("body fetch [klay/63+] requested on klay/%d", p.version)) 206 } 207 // Short circuit if the peer is already fetching 208 if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) { 209 return errAlreadyFetching 210 } 211 p.receiptStarted = time.Now() 212 213 // Convert the header set to a retrievable slice 214 hashes := make([]common.Hash, 0, len(request.Headers)) 215 for _, header := range request.Headers { 216 hashes = append(hashes, header.Hash()) 217 } 218 go p.peer.RequestReceipts(hashes) 219 220 return nil 221 } 222 223 // FetchStakingInfo sends a staking information retrieval request to the remote peer. 224 func (p *peerConnection) FetchStakingInfo(request *fetchRequest) error { 225 // Sanity check the protocol version 226 if p.version < 65 { 227 panic(fmt.Sprintf("staking info fetch [klay/65+] requested on klay/%d", p.version)) 228 } 229 // Short circuit if the peer is already fetching 230 if !atomic.CompareAndSwapInt32(&p.stakingInfoIdle, 0, 1) { 231 return errAlreadyFetching 232 } 233 p.stakingInfoStarted = time.Now() 234 235 // Convert the header set to a retrievable slice 236 hashes := make([]common.Hash, 0, len(request.Headers)) 237 for _, header := range request.Headers { 238 hashes = append(hashes, header.Hash()) 239 } 240 go p.peer.RequestStakingInfo(hashes) 241 242 return nil 243 } 244 245 // FetchNodeData sends a node state data retrieval request to the remote peer. 246 func (p *peerConnection) FetchNodeData(hashes []common.Hash) error { 247 // Sanity check the protocol version 248 if p.version < 63 { 249 panic(fmt.Sprintf("node data fetch [klay/63+] requested on klay/%d", p.version)) 250 } 251 // Short circuit if the peer is already fetching 252 if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) { 253 return errAlreadyFetching 254 } 255 p.stateStarted = time.Now() 256 257 go p.peer.RequestNodeData(hashes) 258 259 return nil 260 } 261 262 // SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval 263 // requests. Its estimated header retrieval throughput is updated with that measured 264 // just now. 265 func (p *peerConnection) SetHeadersIdle(delivered int, deliveryTime time.Time) { 266 p.setIdle(deliveryTime.Sub(p.headerStarted), delivered, &p.headerThroughput, &p.headerIdle) 267 } 268 269 // SetBlocksIdle sets the peer to idle, allowing it to execute new block retrieval 270 // requests. Its estimated block retrieval throughput is updated with that measured 271 // just now. 272 func (p *peerConnection) SetBlocksIdle(delivered int, deliveryTime time.Time) { 273 p.setIdle(deliveryTime.Sub(p.blockStarted), delivered, &p.blockThroughput, &p.blockIdle) 274 } 275 276 // SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval 277 // requests. Its estimated body retrieval throughput is updated with that measured 278 // just now. 279 func (p *peerConnection) SetBodiesIdle(delivered int, deliveryTime time.Time) { 280 p.setIdle(deliveryTime.Sub(p.blockStarted), delivered, &p.blockThroughput, &p.blockIdle) 281 } 282 283 // SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt 284 // retrieval requests. Its estimated receipt retrieval throughput is updated 285 // with that measured just now. 286 func (p *peerConnection) SetReceiptsIdle(delivered int, deliveryTime time.Time) { 287 p.setIdle(deliveryTime.Sub(p.receiptStarted), delivered, &p.receiptThroughput, &p.receiptIdle) 288 } 289 290 // SetStakingInfoIdle sets the peer to idle, allowing it to execute new staking info 291 // retrieval requests. Its estimated receipt retrieval throughput is updated 292 // with that measured just now. 293 func (p *peerConnection) SetStakingInfoIdle(delivered int, deliveryTime time.Time) { 294 p.setIdle(deliveryTime.Sub(p.stakingInfoStarted), delivered, &p.stakingInfoThroughput, &p.stakingInfoIdle) 295 } 296 297 // SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie 298 // data retrieval requests. Its estimated state retrieval throughput is updated 299 // with that measured just now. 300 func (p *peerConnection) SetNodeDataIdle(delivered int, deliveryTime time.Time) { 301 p.setIdle(deliveryTime.Sub(p.stateStarted), delivered, &p.stateThroughput, &p.stateIdle) 302 } 303 304 // setIdle sets the peer to idle, allowing it to execute new retrieval requests. 305 // Its estimated retrieval throughput is updated with that measured just now. 306 func (p *peerConnection) setIdle(elapsed time.Duration, delivered int, throughput *float64, idle *int32) { 307 // Irrelevant of the scaling, make sure the peer ends up idle 308 defer atomic.StoreInt32(idle, 0) 309 310 p.lock.Lock() 311 defer p.lock.Unlock() 312 313 // If nothing was delivered (hard timeout / unavailable data), reduce throughput to minimum 314 if delivered == 0 { 315 *throughput = 0 316 return 317 } 318 // Otherwise update the throughput with a new measurement 319 if elapsed <= 0 { 320 elapsed = 1 // +1 (ns) to ensure non-zero divisor 321 } 322 measured := float64(delivered) / (float64(elapsed) / float64(time.Second)) 323 324 *throughput = (1-measurementImpact)*(*throughput) + measurementImpact*measured 325 p.rtt = time.Duration((1-measurementImpact)*float64(p.rtt) + measurementImpact*float64(elapsed)) 326 327 p.logger.Trace("Peer throughput measurements updated", 328 "hps", p.headerThroughput, "bps", p.blockThroughput, 329 "rps", p.receiptThroughput, "sps", p.stateThroughput, 330 "miss", len(p.lacking), "rtt", p.rtt) 331 } 332 333 // HeaderCapacity retrieves the peers header download allowance based on its 334 // previously discovered throughput. 335 func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int { 336 p.lock.RLock() 337 defer p.lock.RUnlock() 338 339 return int(math.Min(1+math.Max(1, p.headerThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxHeaderFetch))) 340 } 341 342 // BlockCapacity retrieves the peers block download allowance based on its 343 // previously discovered throughput. 344 func (p *peerConnection) BlockCapacity(targetRTT time.Duration) int { 345 p.lock.RLock() 346 defer p.lock.RUnlock() 347 348 return int(math.Min(1+math.Max(1, p.blockThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxBlockFetch))) 349 } 350 351 // ReceiptCapacity retrieves the peers receipt download allowance based on its 352 // previously discovered throughput. 353 func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int { 354 p.lock.RLock() 355 defer p.lock.RUnlock() 356 357 return int(math.Min(1+math.Max(1, p.receiptThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxReceiptFetch))) 358 } 359 360 // StakingInfoCapacity retrieves the peers staking info download allowance based on its 361 // previously discovered throughput. 362 func (p *peerConnection) StakingInfoCapacity(targetRTT time.Duration) int { 363 p.lock.RLock() 364 defer p.lock.RUnlock() 365 366 return int(math.Min(1+math.Max(1, p.stakingInfoThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxStakingInfoFetch))) 367 } 368 369 // NodeDataCapacity retrieves the peers state download allowance based on its 370 // previously discovered throughput. 371 func (p *peerConnection) NodeDataCapacity(targetRTT time.Duration) int { 372 p.lock.RLock() 373 defer p.lock.RUnlock() 374 375 return int(math.Min(1+math.Max(1, p.stateThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxStateFetch))) 376 } 377 378 // MarkLacking appends a new entity to the set of items (blocks, receipts, states) 379 // that a peer is known not to have (i.e. have been requested before). If the 380 // set reaches its maximum allowed capacity, items are randomly dropped off. 381 func (p *peerConnection) MarkLacking(hash common.Hash) { 382 p.lock.Lock() 383 defer p.lock.Unlock() 384 385 for len(p.lacking) >= maxLackingHashes { 386 for drop := range p.lacking { 387 delete(p.lacking, drop) 388 break 389 } 390 } 391 p.lacking[hash] = struct{}{} 392 } 393 394 // Lacks retrieves whether the hash of a blockchain item is on the peers lacking 395 // list (i.e. whether we know that the peer does not have it). 396 func (p *peerConnection) Lacks(hash common.Hash) bool { 397 p.lock.RLock() 398 defer p.lock.RUnlock() 399 400 _, ok := p.lacking[hash] 401 return ok 402 } 403 404 // peerSet represents the collection of active peer participating in the chain 405 // download procedure. 406 type peerSet struct { 407 peers map[string]*peerConnection 408 newPeerFeed event.Feed 409 peerDropFeed event.Feed 410 lock sync.RWMutex 411 } 412 413 // newPeerSet creates a new peer set top track the active download sources. 414 func newPeerSet() *peerSet { 415 return &peerSet{ 416 peers: make(map[string]*peerConnection), 417 } 418 } 419 420 // SubscribeNewPeers subscribes to peer arrival events. 421 func (ps *peerSet) SubscribeNewPeers(ch chan<- *peerConnection) event.Subscription { 422 return ps.newPeerFeed.Subscribe(ch) 423 } 424 425 // SubscribePeerDrops subscribes to peer departure events. 426 func (ps *peerSet) SubscribePeerDrops(ch chan<- *peerConnection) event.Subscription { 427 return ps.peerDropFeed.Subscribe(ch) 428 } 429 430 // Reset iterates over the current peer set, and resets each of the known peers 431 // to prepare for a next batch of block retrieval. 432 func (ps *peerSet) Reset() { 433 ps.lock.RLock() 434 defer ps.lock.RUnlock() 435 436 for _, peer := range ps.peers { 437 peer.Reset() 438 } 439 } 440 441 // Register injects a new peer into the working set, or returns an error if the 442 // peer is already known. 443 // 444 // The method also sets the starting throughput values of the new peer to the 445 // average of all existing peers, to give it a realistic chance of being used 446 // for data retrievals. 447 func (ps *peerSet) Register(p *peerConnection) error { 448 // Retrieve the current median RTT as a sane default 449 p.rtt = ps.medianRTT() 450 451 // Register the new peer with some meaningful defaults 452 ps.lock.Lock() 453 if _, ok := ps.peers[p.id]; ok { 454 ps.lock.Unlock() 455 return errAlreadyRegistered 456 } 457 if len(ps.peers) > 0 { 458 p.headerThroughput, p.blockThroughput, p.receiptThroughput, p.stakingInfoThroughput, p.stateThroughput = 0, 0, 0, 0, 0 459 460 for _, peer := range ps.peers { 461 peer.lock.RLock() 462 p.headerThroughput += peer.headerThroughput 463 p.blockThroughput += peer.blockThroughput 464 p.receiptThroughput += peer.receiptThroughput 465 p.stakingInfoThroughput += peer.stakingInfoThroughput 466 p.stateThroughput += peer.stateThroughput 467 peer.lock.RUnlock() 468 } 469 peerLengthFloat := float64(len(ps.peers)) 470 p.headerThroughput /= peerLengthFloat 471 p.blockThroughput /= peerLengthFloat 472 p.receiptThroughput /= peerLengthFloat 473 p.stakingInfoThroughput /= peerLengthFloat 474 p.stateThroughput /= peerLengthFloat 475 } 476 ps.peers[p.id] = p 477 ps.lock.Unlock() 478 479 ps.newPeerFeed.Send(p) 480 return nil 481 } 482 483 // Unregister removes a remote peer from the active set, disabling any further 484 // actions to/from that particular entity. 485 func (ps *peerSet) Unregister(id string) error { 486 ps.lock.Lock() 487 p, ok := ps.peers[id] 488 if !ok { 489 defer ps.lock.Unlock() 490 return errNotRegistered 491 } 492 delete(ps.peers, id) 493 ps.lock.Unlock() 494 495 ps.peerDropFeed.Send(p) 496 return nil 497 } 498 499 // Peer retrieves the registered peer with the given id. 500 func (ps *peerSet) Peer(id string) *peerConnection { 501 ps.lock.RLock() 502 defer ps.lock.RUnlock() 503 504 return ps.peers[id] 505 } 506 507 // Len returns if the current number of peers in the set. 508 func (ps *peerSet) Len() int { 509 ps.lock.RLock() 510 defer ps.lock.RUnlock() 511 512 return len(ps.peers) 513 } 514 515 // AllPeers retrieves a flat list of all the peers within the set. 516 func (ps *peerSet) AllPeers() []*peerConnection { 517 ps.lock.RLock() 518 defer ps.lock.RUnlock() 519 520 list := make([]*peerConnection, 0, len(ps.peers)) 521 for _, p := range ps.peers { 522 list = append(list, p) 523 } 524 return list 525 } 526 527 // HeaderIdlePeers retrieves a flat list of all the currently header-idle peers 528 // within the active peer set, ordered by their reputation. 529 func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) { 530 idleCheck := func(p *peerConnection) bool { 531 return atomic.LoadInt32(&p.headerIdle) == 0 532 } 533 throughput := func(p *peerConnection) float64 { 534 p.lock.RLock() 535 defer p.lock.RUnlock() 536 return p.headerThroughput 537 } 538 return ps.idlePeers(62, 65, idleCheck, throughput) 539 } 540 541 // BodyIdlePeers retrieves a flat list of all the currently body-idle peers within 542 // the active peer set, ordered by their reputation. 543 func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) { 544 idleCheck := func(p *peerConnection) bool { 545 return atomic.LoadInt32(&p.blockIdle) == 0 546 } 547 throughput := func(p *peerConnection) float64 { 548 p.lock.RLock() 549 defer p.lock.RUnlock() 550 return p.blockThroughput 551 } 552 return ps.idlePeers(62, 65, idleCheck, throughput) 553 } 554 555 // ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers 556 // within the active peer set, ordered by their reputation. 557 func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) { 558 idleCheck := func(p *peerConnection) bool { 559 return atomic.LoadInt32(&p.receiptIdle) == 0 560 } 561 throughput := func(p *peerConnection) float64 { 562 p.lock.RLock() 563 defer p.lock.RUnlock() 564 return p.receiptThroughput 565 } 566 return ps.idlePeers(63, 65, idleCheck, throughput) 567 } 568 569 func (ps *peerSet) StakingInfoIdlePeers() ([]*peerConnection, int) { 570 idleCheck := func(p *peerConnection) bool { 571 return atomic.LoadInt32(&p.stakingInfoIdle) == 0 572 } 573 throughput := func(p *peerConnection) float64 { 574 p.lock.RLock() 575 defer p.lock.RUnlock() 576 return p.stakingInfoThroughput 577 } 578 return ps.idlePeers(65, 65, idleCheck, throughput) 579 } 580 581 // NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle 582 // peers within the active peer set, ordered by their reputation. 583 func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) { 584 idleCheck := func(p *peerConnection) bool { 585 return atomic.LoadInt32(&p.stateIdle) == 0 586 } 587 throughput := func(p *peerConnection) float64 { 588 p.lock.RLock() 589 defer p.lock.RUnlock() 590 return p.stateThroughput 591 } 592 return ps.idlePeers(63, 65, idleCheck, throughput) 593 } 594 595 // TODO-Klaytn-Downloader when idlePeers is called magic numbers are used for minProtocol and maxProtocol. Use a constant instead. 596 // idlePeers retrieves a flat list of all currently idle peers satisfying the 597 // protocol version constraints, using the provided function to check idleness. 598 // The resulting set of peers are sorted by their measure throughput. 599 func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peerConnection) bool, throughput func(*peerConnection) float64) ([]*peerConnection, int) { 600 ps.lock.RLock() 601 defer ps.lock.RUnlock() 602 603 idlePeers, numTotalPeers := make([]*peerConnection, 0, len(ps.peers)), 0 604 tps := make([]float64, 0, len(ps.peers)) 605 for _, p := range ps.peers { 606 if p.version >= minProtocol && p.version <= maxProtocol { 607 if idleCheck(p) { 608 idlePeers = append(idlePeers, p) 609 tps = append(tps, throughput(p)) 610 } 611 numTotalPeers++ 612 } 613 } 614 // sort peers in the descending order of throughput 615 sortPeers := &peerThroughputSort{idlePeers, tps} 616 sort.Sort(sortPeers) 617 return sortPeers.p, numTotalPeers 618 } 619 620 // medianRTT returns the median RTT of the peerset, considering only the tuning 621 // peers if there are more peers available. 622 func (ps *peerSet) medianRTT() time.Duration { 623 // Gather all the currently measured round trip times 624 ps.lock.RLock() 625 defer ps.lock.RUnlock() 626 627 rtts := make([]float64, 0, len(ps.peers)) 628 for _, p := range ps.peers { 629 p.lock.RLock() 630 rtts = append(rtts, float64(p.rtt)) 631 p.lock.RUnlock() 632 } 633 sort.Float64s(rtts) 634 635 median := rttMaxEstimate 636 if qosTuningPeers <= len(rtts) { 637 median = time.Duration(rtts[qosTuningPeers/2]) // Median of our tuning peers 638 } else if len(rtts) > 0 { 639 median = time.Duration(rtts[len(rtts)/2]) // Median of our connected peers (maintain even like this some baseline qos) 640 } 641 // Restrict the RTT into some QoS defaults, irrelevant of true RTT 642 if median < rttMinEstimate { 643 median = rttMinEstimate 644 } 645 if median > rttMaxEstimate { 646 median = rttMaxEstimate 647 } 648 return median 649 } 650 651 // peerThroughputSort implements the Sort interface, and allows for 652 // sorting a set of peers by their throughput 653 // The sorted data is with the _highest_ throughput first 654 type peerThroughputSort struct { 655 p []*peerConnection 656 tp []float64 657 } 658 659 func (ps *peerThroughputSort) Len() int { 660 return len(ps.p) 661 } 662 663 func (ps *peerThroughputSort) Less(i, j int) bool { 664 return ps.tp[i] > ps.tp[j] 665 } 666 667 func (ps *peerThroughputSort) Swap(i, j int) { 668 ps.p[i], ps.p[j] = ps.p[j], ps.p[i] 669 ps.tp[i], ps.tp[j] = ps.tp[j], ps.tp[i] 670 }