github.com/snowblossomcoin/go-ethereum@v1.9.25/eth/downloader/peer.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Contains the active peer-set of the downloader, maintaining both failures 18 // as well as reputation metrics to prioritize the block retrievals. 19 20 package downloader 21 22 import ( 23 "errors" 24 "math" 25 "math/big" 26 "sort" 27 "sync" 28 "sync/atomic" 29 "time" 30 31 "github.com/ethereum/go-ethereum/common" 32 "github.com/ethereum/go-ethereum/event" 33 "github.com/ethereum/go-ethereum/log" 34 ) 35 36 const ( 37 maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items 38 measurementImpact = 0.1 // The impact a single measurement has on a peer's final throughput value. 39 ) 40 41 var ( 42 errAlreadyFetching = errors.New("already fetching blocks from peer") 43 errAlreadyRegistered = errors.New("peer is already registered") 44 errNotRegistered = errors.New("peer is not registered") 45 ) 46 47 // peerConnection represents an active peer from which hashes and blocks are retrieved. 48 type peerConnection struct { 49 id string // Unique identifier of the peer 50 51 headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1) 52 blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1) 53 receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1) 54 stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1) 55 56 headerThroughput float64 // Number of headers measured to be retrievable per second 57 blockThroughput float64 // Number of blocks (bodies) measured to be retrievable per second 58 receiptThroughput float64 // Number of receipts measured to be retrievable per second 59 stateThroughput float64 // Number of node data pieces measured to be retrievable per second 60 61 rtt time.Duration // Request round trip time to track responsiveness (QoS) 62 63 headerStarted time.Time // Time instance when the last header fetch was started 64 blockStarted time.Time // Time instance when the last block (body) fetch was started 65 receiptStarted time.Time // Time instance when the last receipt fetch was started 66 stateStarted time.Time // Time instance when the last node data fetch was started 67 68 lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously) 69 70 peer Peer 71 72 version int // Eth protocol version number to switch strategies 73 log log.Logger // Contextual logger to add extra infos to peer logs 74 lock sync.RWMutex 75 } 76 77 // LightPeer encapsulates the methods required to synchronise with a remote light peer. 78 type LightPeer interface { 79 Head() (common.Hash, *big.Int) 80 RequestHeadersByHash(common.Hash, int, int, bool) error 81 RequestHeadersByNumber(uint64, int, int, bool) error 82 } 83 84 // Peer encapsulates the methods required to synchronise with a remote full peer. 85 type Peer interface { 86 LightPeer 87 RequestBodies([]common.Hash) error 88 RequestReceipts([]common.Hash) error 89 RequestNodeData([]common.Hash) error 90 } 91 92 // lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods. 93 type lightPeerWrapper struct { 94 peer LightPeer 95 } 96 97 func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() } 98 func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool) error { 99 return w.peer.RequestHeadersByHash(h, amount, skip, reverse) 100 } 101 func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool) error { 102 return w.peer.RequestHeadersByNumber(i, amount, skip, reverse) 103 } 104 func (w *lightPeerWrapper) RequestBodies([]common.Hash) error { 105 panic("RequestBodies not supported in light client mode sync") 106 } 107 func (w *lightPeerWrapper) RequestReceipts([]common.Hash) error { 108 panic("RequestReceipts not supported in light client mode sync") 109 } 110 func (w *lightPeerWrapper) RequestNodeData([]common.Hash) error { 111 panic("RequestNodeData not supported in light client mode sync") 112 } 113 114 // newPeerConnection creates a new downloader peer. 115 func newPeerConnection(id string, version int, peer Peer, logger log.Logger) *peerConnection { 116 return &peerConnection{ 117 id: id, 118 lacking: make(map[common.Hash]struct{}), 119 peer: peer, 120 version: version, 121 log: logger, 122 } 123 } 124 125 // Reset clears the internal state of a peer entity. 126 func (p *peerConnection) Reset() { 127 p.lock.Lock() 128 defer p.lock.Unlock() 129 130 atomic.StoreInt32(&p.headerIdle, 0) 131 atomic.StoreInt32(&p.blockIdle, 0) 132 atomic.StoreInt32(&p.receiptIdle, 0) 133 atomic.StoreInt32(&p.stateIdle, 0) 134 135 p.headerThroughput = 0 136 p.blockThroughput = 0 137 p.receiptThroughput = 0 138 p.stateThroughput = 0 139 140 p.lacking = make(map[common.Hash]struct{}) 141 } 142 143 // FetchHeaders sends a header retrieval request to the remote peer. 144 func (p *peerConnection) FetchHeaders(from uint64, count int) error { 145 // Short circuit if the peer is already fetching 146 if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) { 147 return errAlreadyFetching 148 } 149 p.headerStarted = time.Now() 150 151 // Issue the header retrieval request (absolute upwards without gaps) 152 go p.peer.RequestHeadersByNumber(from, count, 0, false) 153 154 return nil 155 } 156 157 // FetchBodies sends a block body retrieval request to the remote peer. 158 func (p *peerConnection) FetchBodies(request *fetchRequest) error { 159 // Short circuit if the peer is already fetching 160 if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { 161 return errAlreadyFetching 162 } 163 p.blockStarted = time.Now() 164 165 go func() { 166 // Convert the header set to a retrievable slice 167 hashes := make([]common.Hash, 0, len(request.Headers)) 168 for _, header := range request.Headers { 169 hashes = append(hashes, header.Hash()) 170 } 171 p.peer.RequestBodies(hashes) 172 }() 173 174 return nil 175 } 176 177 // FetchReceipts sends a receipt retrieval request to the remote peer. 178 func (p *peerConnection) FetchReceipts(request *fetchRequest) error { 179 // Short circuit if the peer is already fetching 180 if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) { 181 return errAlreadyFetching 182 } 183 p.receiptStarted = time.Now() 184 185 go func() { 186 // Convert the header set to a retrievable slice 187 hashes := make([]common.Hash, 0, len(request.Headers)) 188 for _, header := range request.Headers { 189 hashes = append(hashes, header.Hash()) 190 } 191 p.peer.RequestReceipts(hashes) 192 }() 193 194 return nil 195 } 196 197 // FetchNodeData sends a node state data retrieval request to the remote peer. 198 func (p *peerConnection) FetchNodeData(hashes []common.Hash) error { 199 // Short circuit if the peer is already fetching 200 if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) { 201 return errAlreadyFetching 202 } 203 p.stateStarted = time.Now() 204 205 go p.peer.RequestNodeData(hashes) 206 207 return nil 208 } 209 210 // SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval 211 // requests. Its estimated header retrieval throughput is updated with that measured 212 // just now. 213 func (p *peerConnection) SetHeadersIdle(delivered int, deliveryTime time.Time) { 214 p.setIdle(deliveryTime.Sub(p.headerStarted), delivered, &p.headerThroughput, &p.headerIdle) 215 } 216 217 // SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval 218 // requests. Its estimated body retrieval throughput is updated with that measured 219 // just now. 220 func (p *peerConnection) SetBodiesIdle(delivered int, deliveryTime time.Time) { 221 p.setIdle(deliveryTime.Sub(p.blockStarted), delivered, &p.blockThroughput, &p.blockIdle) 222 } 223 224 // SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt 225 // retrieval requests. Its estimated receipt retrieval throughput is updated 226 // with that measured just now. 227 func (p *peerConnection) SetReceiptsIdle(delivered int, deliveryTime time.Time) { 228 p.setIdle(deliveryTime.Sub(p.receiptStarted), delivered, &p.receiptThroughput, &p.receiptIdle) 229 } 230 231 // SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie 232 // data retrieval requests. Its estimated state retrieval throughput is updated 233 // with that measured just now. 234 func (p *peerConnection) SetNodeDataIdle(delivered int, deliveryTime time.Time) { 235 p.setIdle(deliveryTime.Sub(p.stateStarted), delivered, &p.stateThroughput, &p.stateIdle) 236 } 237 238 // setIdle sets the peer to idle, allowing it to execute new retrieval requests. 239 // Its estimated retrieval throughput is updated with that measured just now. 240 func (p *peerConnection) setIdle(elapsed time.Duration, delivered int, throughput *float64, idle *int32) { 241 // Irrelevant of the scaling, make sure the peer ends up idle 242 defer atomic.StoreInt32(idle, 0) 243 244 p.lock.Lock() 245 defer p.lock.Unlock() 246 247 // If nothing was delivered (hard timeout / unavailable data), reduce throughput to minimum 248 if delivered == 0 { 249 *throughput = 0 250 return 251 } 252 // Otherwise update the throughput with a new measurement 253 if elapsed <= 0 { 254 elapsed = 1 // +1 (ns) to ensure non-zero divisor 255 } 256 measured := float64(delivered) / (float64(elapsed) / float64(time.Second)) 257 258 *throughput = (1-measurementImpact)*(*throughput) + measurementImpact*measured 259 p.rtt = time.Duration((1-measurementImpact)*float64(p.rtt) + measurementImpact*float64(elapsed)) 260 261 p.log.Trace("Peer throughput measurements updated", 262 "hps", p.headerThroughput, "bps", p.blockThroughput, 263 "rps", p.receiptThroughput, "sps", p.stateThroughput, 264 "miss", len(p.lacking), "rtt", p.rtt) 265 } 266 267 // HeaderCapacity retrieves the peers header download allowance based on its 268 // previously discovered throughput. 269 func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int { 270 p.lock.RLock() 271 defer p.lock.RUnlock() 272 273 return int(math.Min(1+math.Max(1, p.headerThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxHeaderFetch))) 274 } 275 276 // BlockCapacity retrieves the peers block download allowance based on its 277 // previously discovered throughput. 278 func (p *peerConnection) BlockCapacity(targetRTT time.Duration) int { 279 p.lock.RLock() 280 defer p.lock.RUnlock() 281 282 return int(math.Min(1+math.Max(1, p.blockThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxBlockFetch))) 283 } 284 285 // ReceiptCapacity retrieves the peers receipt download allowance based on its 286 // previously discovered throughput. 287 func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int { 288 p.lock.RLock() 289 defer p.lock.RUnlock() 290 291 return int(math.Min(1+math.Max(1, p.receiptThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxReceiptFetch))) 292 } 293 294 // NodeDataCapacity retrieves the peers state download allowance based on its 295 // previously discovered throughput. 296 func (p *peerConnection) NodeDataCapacity(targetRTT time.Duration) int { 297 p.lock.RLock() 298 defer p.lock.RUnlock() 299 300 return int(math.Min(1+math.Max(1, p.stateThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxStateFetch))) 301 } 302 303 // MarkLacking appends a new entity to the set of items (blocks, receipts, states) 304 // that a peer is known not to have (i.e. have been requested before). If the 305 // set reaches its maximum allowed capacity, items are randomly dropped off. 306 func (p *peerConnection) MarkLacking(hash common.Hash) { 307 p.lock.Lock() 308 defer p.lock.Unlock() 309 310 for len(p.lacking) >= maxLackingHashes { 311 for drop := range p.lacking { 312 delete(p.lacking, drop) 313 break 314 } 315 } 316 p.lacking[hash] = struct{}{} 317 } 318 319 // Lacks retrieves whether the hash of a blockchain item is on the peers lacking 320 // list (i.e. whether we know that the peer does not have it). 321 func (p *peerConnection) Lacks(hash common.Hash) bool { 322 p.lock.RLock() 323 defer p.lock.RUnlock() 324 325 _, ok := p.lacking[hash] 326 return ok 327 } 328 329 // peerSet represents the collection of active peer participating in the chain 330 // download procedure. 331 type peerSet struct { 332 peers map[string]*peerConnection 333 newPeerFeed event.Feed 334 peerDropFeed event.Feed 335 lock sync.RWMutex 336 } 337 338 // newPeerSet creates a new peer set top track the active download sources. 339 func newPeerSet() *peerSet { 340 return &peerSet{ 341 peers: make(map[string]*peerConnection), 342 } 343 } 344 345 // SubscribeNewPeers subscribes to peer arrival events. 346 func (ps *peerSet) SubscribeNewPeers(ch chan<- *peerConnection) event.Subscription { 347 return ps.newPeerFeed.Subscribe(ch) 348 } 349 350 // SubscribePeerDrops subscribes to peer departure events. 351 func (ps *peerSet) SubscribePeerDrops(ch chan<- *peerConnection) event.Subscription { 352 return ps.peerDropFeed.Subscribe(ch) 353 } 354 355 // Reset iterates over the current peer set, and resets each of the known peers 356 // to prepare for a next batch of block retrieval. 357 func (ps *peerSet) Reset() { 358 ps.lock.RLock() 359 defer ps.lock.RUnlock() 360 361 for _, peer := range ps.peers { 362 peer.Reset() 363 } 364 } 365 366 // Register injects a new peer into the working set, or returns an error if the 367 // peer is already known. 368 // 369 // The method also sets the starting throughput values of the new peer to the 370 // average of all existing peers, to give it a realistic chance of being used 371 // for data retrievals. 372 func (ps *peerSet) Register(p *peerConnection) error { 373 // Retrieve the current median RTT as a sane default 374 p.rtt = ps.medianRTT() 375 376 // Register the new peer with some meaningful defaults 377 ps.lock.Lock() 378 if _, ok := ps.peers[p.id]; ok { 379 ps.lock.Unlock() 380 return errAlreadyRegistered 381 } 382 if len(ps.peers) > 0 { 383 p.headerThroughput, p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0, 0 384 385 for _, peer := range ps.peers { 386 peer.lock.RLock() 387 p.headerThroughput += peer.headerThroughput 388 p.blockThroughput += peer.blockThroughput 389 p.receiptThroughput += peer.receiptThroughput 390 p.stateThroughput += peer.stateThroughput 391 peer.lock.RUnlock() 392 } 393 p.headerThroughput /= float64(len(ps.peers)) 394 p.blockThroughput /= float64(len(ps.peers)) 395 p.receiptThroughput /= float64(len(ps.peers)) 396 p.stateThroughput /= float64(len(ps.peers)) 397 } 398 ps.peers[p.id] = p 399 ps.lock.Unlock() 400 401 ps.newPeerFeed.Send(p) 402 return nil 403 } 404 405 // Unregister removes a remote peer from the active set, disabling any further 406 // actions to/from that particular entity. 407 func (ps *peerSet) Unregister(id string) error { 408 ps.lock.Lock() 409 p, ok := ps.peers[id] 410 if !ok { 411 ps.lock.Unlock() 412 return errNotRegistered 413 } 414 delete(ps.peers, id) 415 ps.lock.Unlock() 416 417 ps.peerDropFeed.Send(p) 418 return nil 419 } 420 421 // Peer retrieves the registered peer with the given id. 422 func (ps *peerSet) Peer(id string) *peerConnection { 423 ps.lock.RLock() 424 defer ps.lock.RUnlock() 425 426 return ps.peers[id] 427 } 428 429 // Len returns if the current number of peers in the set. 430 func (ps *peerSet) Len() int { 431 ps.lock.RLock() 432 defer ps.lock.RUnlock() 433 434 return len(ps.peers) 435 } 436 437 // AllPeers retrieves a flat list of all the peers within the set. 438 func (ps *peerSet) AllPeers() []*peerConnection { 439 ps.lock.RLock() 440 defer ps.lock.RUnlock() 441 442 list := make([]*peerConnection, 0, len(ps.peers)) 443 for _, p := range ps.peers { 444 list = append(list, p) 445 } 446 return list 447 } 448 449 // HeaderIdlePeers retrieves a flat list of all the currently header-idle peers 450 // within the active peer set, ordered by their reputation. 451 func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) { 452 idle := func(p *peerConnection) bool { 453 return atomic.LoadInt32(&p.headerIdle) == 0 454 } 455 throughput := func(p *peerConnection) float64 { 456 p.lock.RLock() 457 defer p.lock.RUnlock() 458 return p.headerThroughput 459 } 460 return ps.idlePeers(63, 65, idle, throughput) 461 } 462 463 // BodyIdlePeers retrieves a flat list of all the currently body-idle peers within 464 // the active peer set, ordered by their reputation. 465 func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) { 466 idle := func(p *peerConnection) bool { 467 return atomic.LoadInt32(&p.blockIdle) == 0 468 } 469 throughput := func(p *peerConnection) float64 { 470 p.lock.RLock() 471 defer p.lock.RUnlock() 472 return p.blockThroughput 473 } 474 return ps.idlePeers(63, 65, idle, throughput) 475 } 476 477 // ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers 478 // within the active peer set, ordered by their reputation. 479 func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) { 480 idle := func(p *peerConnection) bool { 481 return atomic.LoadInt32(&p.receiptIdle) == 0 482 } 483 throughput := func(p *peerConnection) float64 { 484 p.lock.RLock() 485 defer p.lock.RUnlock() 486 return p.receiptThroughput 487 } 488 return ps.idlePeers(63, 65, idle, throughput) 489 } 490 491 // NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle 492 // peers within the active peer set, ordered by their reputation. 493 func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) { 494 idle := func(p *peerConnection) bool { 495 return atomic.LoadInt32(&p.stateIdle) == 0 496 } 497 throughput := func(p *peerConnection) float64 { 498 p.lock.RLock() 499 defer p.lock.RUnlock() 500 return p.stateThroughput 501 } 502 return ps.idlePeers(63, 65, idle, throughput) 503 } 504 505 // idlePeers retrieves a flat list of all currently idle peers satisfying the 506 // protocol version constraints, using the provided function to check idleness. 507 // The resulting set of peers are sorted by their measure throughput. 508 func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peerConnection) bool, throughput func(*peerConnection) float64) ([]*peerConnection, int) { 509 ps.lock.RLock() 510 defer ps.lock.RUnlock() 511 512 idle, total := make([]*peerConnection, 0, len(ps.peers)), 0 513 tps := make([]float64, 0, len(ps.peers)) 514 for _, p := range ps.peers { 515 if p.version >= minProtocol && p.version <= maxProtocol { 516 if idleCheck(p) { 517 idle = append(idle, p) 518 tps = append(tps, throughput(p)) 519 } 520 total++ 521 } 522 } 523 // And sort them 524 sortPeers := &peerThroughputSort{idle, tps} 525 sort.Sort(sortPeers) 526 return sortPeers.p, total 527 } 528 529 // medianRTT returns the median RTT of the peerset, considering only the tuning 530 // peers if there are more peers available. 531 func (ps *peerSet) medianRTT() time.Duration { 532 // Gather all the currently measured round trip times 533 ps.lock.RLock() 534 defer ps.lock.RUnlock() 535 536 rtts := make([]float64, 0, len(ps.peers)) 537 for _, p := range ps.peers { 538 p.lock.RLock() 539 rtts = append(rtts, float64(p.rtt)) 540 p.lock.RUnlock() 541 } 542 sort.Float64s(rtts) 543 544 median := rttMaxEstimate 545 if qosTuningPeers <= len(rtts) { 546 median = time.Duration(rtts[qosTuningPeers/2]) // Median of our tuning peers 547 } else if len(rtts) > 0 { 548 median = time.Duration(rtts[len(rtts)/2]) // Median of our connected peers (maintain even like this some baseline qos) 549 } 550 // Restrict the RTT into some QoS defaults, irrelevant of true RTT 551 if median < rttMinEstimate { 552 median = rttMinEstimate 553 } 554 if median > rttMaxEstimate { 555 median = rttMaxEstimate 556 } 557 return median 558 } 559 560 // peerThroughputSort implements the Sort interface, and allows for 561 // sorting a set of peers by their throughput 562 // The sorted data is with the _highest_ throughput first 563 type peerThroughputSort struct { 564 p []*peerConnection 565 tp []float64 566 } 567 568 func (ps *peerThroughputSort) Len() int { 569 return len(ps.p) 570 } 571 572 func (ps *peerThroughputSort) Less(i, j int) bool { 573 return ps.tp[i] > ps.tp[j] 574 } 575 576 func (ps *peerThroughputSort) Swap(i, j int) { 577 ps.p[i], ps.p[j] = ps.p[j], ps.p[i] 578 ps.tp[i], ps.tp[j] = ps.tp[j], ps.tp[i] 579 }