github.com/codingfuture/orig-energi3@v0.8.4/eth/downloader/peer.go (about) 1 // Copyright 2018 The Energi Core Authors 2 // Copyright 2015 The go-ethereum Authors 3 // This file is part of the Energi Core library. 4 // 5 // The Energi Core library is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Lesser General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // The Energi Core library is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Lesser General Public License for more details. 14 // 15 // You should have received a copy of the GNU Lesser General Public License 16 // along with the Energi Core library. If not, see <http://www.gnu.org/licenses/>. 17 18 // Contains the active peer-set of the downloader, maintaining both failures 19 // as well as reputation metrics to prioritize the block retrievals. 20 21 package downloader 22 23 import ( 24 "errors" 25 "fmt" 26 "math" 27 "math/big" 28 "sort" 29 "sync" 30 "sync/atomic" 31 "time" 32 33 "github.com/ethereum/go-ethereum/common" 34 "github.com/ethereum/go-ethereum/event" 35 "github.com/ethereum/go-ethereum/log" 36 ) 37 38 const ( 39 maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items 40 measurementImpact = 0.1 // The impact a single measurement has on a peer's final throughput value. 41 ) 42 43 var ( 44 errAlreadyFetching = errors.New("already fetching blocks from peer") 45 errAlreadyRegistered = errors.New("peer is already registered") 46 errNotRegistered = errors.New("peer is not registered") 47 ) 48 49 // peerConnection represents an active peer from which hashes and blocks are retrieved. 50 type peerConnection struct { 51 id string // Unique identifier of the peer 52 53 headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1) 54 blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1) 55 receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1) 56 stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1) 57 58 headerThroughput float64 // Number of headers measured to be retrievable per second 59 blockThroughput float64 // Number of blocks (bodies) measured to be retrievable per second 60 receiptThroughput float64 // Number of receipts measured to be retrievable per second 61 stateThroughput float64 // Number of node data pieces measured to be retrievable per second 62 63 rtt time.Duration // Request round trip time to track responsiveness (QoS) 64 65 headerStarted time.Time // Time instance when the last header fetch was started 66 blockStarted time.Time // Time instance when the last block (body) fetch was started 67 receiptStarted time.Time // Time instance when the last receipt fetch was started 68 stateStarted time.Time // Time instance when the last node data fetch was started 69 70 lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously) 71 72 peer Peer 73 74 version int // Eth protocol version number to switch strategies 75 log log.Logger // Contextual logger to add extra infos to peer logs 76 lock sync.RWMutex 77 } 78 79 // LightPeer encapsulates the methods required to synchronise with a remote light peer. 80 type LightPeer interface { 81 Head() (common.Hash, *big.Int) 82 RequestHeadersByHash(common.Hash, int, int, bool) error 83 RequestHeadersByNumber(uint64, int, int, bool) error 84 } 85 86 // Peer encapsulates the methods required to synchronise with a remote full peer. 87 type Peer interface { 88 LightPeer 89 RequestBodies([]common.Hash) error 90 RequestReceipts([]common.Hash) error 91 RequestNodeData([]common.Hash) error 92 } 93 94 // lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods. 95 type lightPeerWrapper struct { 96 peer LightPeer 97 } 98 99 func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() } 100 func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool) error { 101 return w.peer.RequestHeadersByHash(h, amount, skip, reverse) 102 } 103 func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool) error { 104 return w.peer.RequestHeadersByNumber(i, amount, skip, reverse) 105 } 106 func (w *lightPeerWrapper) RequestBodies([]common.Hash) error { 107 panic("RequestBodies not supported in light client mode sync") 108 } 109 func (w *lightPeerWrapper) RequestReceipts([]common.Hash) error { 110 panic("RequestReceipts not supported in light client mode sync") 111 } 112 func (w *lightPeerWrapper) RequestNodeData([]common.Hash) error { 113 panic("RequestNodeData not supported in light client mode sync") 114 } 115 116 // newPeerConnection creates a new downloader peer. 117 func newPeerConnection(id string, version int, peer Peer, logger log.Logger) *peerConnection { 118 return &peerConnection{ 119 id: id, 120 lacking: make(map[common.Hash]struct{}), 121 122 peer: peer, 123 124 version: version, 125 log: logger, 126 } 127 } 128 129 // Reset clears the internal state of a peer entity. 130 func (p *peerConnection) Reset() { 131 p.lock.Lock() 132 defer p.lock.Unlock() 133 134 atomic.StoreInt32(&p.headerIdle, 0) 135 atomic.StoreInt32(&p.blockIdle, 0) 136 atomic.StoreInt32(&p.receiptIdle, 0) 137 atomic.StoreInt32(&p.stateIdle, 0) 138 139 p.headerThroughput = 0 140 p.blockThroughput = 0 141 p.receiptThroughput = 0 142 p.stateThroughput = 0 143 144 p.lacking = make(map[common.Hash]struct{}) 145 } 146 147 // FetchHeaders sends a header retrieval request to the remote peer. 148 func (p *peerConnection) FetchHeaders(from uint64, count int) error { 149 // Sanity check the protocol version 150 if p.version < 62 { 151 panic(fmt.Sprintf("header fetch [eth/62+] requested on eth/%d", p.version)) 152 } 153 // Short circuit if the peer is already fetching 154 if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) { 155 return errAlreadyFetching 156 } 157 p.headerStarted = time.Now() 158 159 // Issue the header retrieval request (absolut upwards without gaps) 160 go p.peer.RequestHeadersByNumber(from, count, 0, false) 161 162 return nil 163 } 164 165 // FetchBodies sends a block body retrieval request to the remote peer. 166 func (p *peerConnection) FetchBodies(request *fetchRequest) error { 167 // Sanity check the protocol version 168 if p.version < 62 { 169 panic(fmt.Sprintf("body fetch [eth/62+] requested on eth/%d", p.version)) 170 } 171 // Short circuit if the peer is already fetching 172 if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { 173 return errAlreadyFetching 174 } 175 p.blockStarted = time.Now() 176 177 // Convert the header set to a retrievable slice 178 hashes := make([]common.Hash, 0, len(request.Headers)) 179 for _, header := range request.Headers { 180 hashes = append(hashes, header.Hash()) 181 } 182 go p.peer.RequestBodies(hashes) 183 184 return nil 185 } 186 187 // FetchReceipts sends a receipt retrieval request to the remote peer. 188 func (p *peerConnection) FetchReceipts(request *fetchRequest) error { 189 // Sanity check the protocol version 190 if p.version < nrg70 { 191 panic(fmt.Sprintf("body fetch [nrg/70+] requested on nrg/%d", p.version)) 192 } 193 // Short circuit if the peer is already fetching 194 if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) { 195 return errAlreadyFetching 196 } 197 p.receiptStarted = time.Now() 198 199 // Convert the header set to a retrievable slice 200 hashes := make([]common.Hash, 0, len(request.Headers)) 201 for _, header := range request.Headers { 202 hashes = append(hashes, header.Hash()) 203 } 204 go p.peer.RequestReceipts(hashes) 205 206 return nil 207 } 208 209 // FetchNodeData sends a node state data retrieval request to the remote peer. 210 func (p *peerConnection) FetchNodeData(hashes []common.Hash) error { 211 // Sanity check the protocol version 212 if p.version < nrg70 { 213 panic(fmt.Sprintf("node data fetch [nrg/70+] requested on nrg/%d", p.version)) 214 } 215 // Short circuit if the peer is already fetching 216 if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) { 217 return errAlreadyFetching 218 } 219 p.stateStarted = time.Now() 220 221 go p.peer.RequestNodeData(hashes) 222 223 return nil 224 } 225 226 // SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval 227 // requests. Its estimated header retrieval throughput is updated with that measured 228 // just now. 229 func (p *peerConnection) SetHeadersIdle(delivered int) { 230 p.setIdle(p.headerStarted, delivered, &p.headerThroughput, &p.headerIdle) 231 } 232 233 // SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval 234 // requests. Its estimated body retrieval throughput is updated with that measured 235 // just now. 236 func (p *peerConnection) SetBodiesIdle(delivered int) { 237 p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle) 238 } 239 240 // SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt 241 // retrieval requests. Its estimated receipt retrieval throughput is updated 242 // with that measured just now. 243 func (p *peerConnection) SetReceiptsIdle(delivered int) { 244 p.setIdle(p.receiptStarted, delivered, &p.receiptThroughput, &p.receiptIdle) 245 } 246 247 // SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie 248 // data retrieval requests. Its estimated state retrieval throughput is updated 249 // with that measured just now. 250 func (p *peerConnection) SetNodeDataIdle(delivered int) { 251 p.setIdle(p.stateStarted, delivered, &p.stateThroughput, &p.stateIdle) 252 } 253 254 // setIdle sets the peer to idle, allowing it to execute new retrieval requests. 255 // Its estimated retrieval throughput is updated with that measured just now. 256 func (p *peerConnection) setIdle(started time.Time, delivered int, throughput *float64, idle *int32) { 257 // Irrelevant of the scaling, make sure the peer ends up idle 258 defer atomic.StoreInt32(idle, 0) 259 260 p.lock.Lock() 261 defer p.lock.Unlock() 262 263 // If nothing was delivered (hard timeout / unavailable data), reduce throughput to minimum 264 if delivered == 0 { 265 *throughput = 0 266 return 267 } 268 // Otherwise update the throughput with a new measurement 269 elapsed := time.Since(started) + 1 // +1 (ns) to ensure non-zero divisor 270 measured := float64(delivered) / (float64(elapsed) / float64(time.Second)) 271 272 *throughput = (1-measurementImpact)*(*throughput) + measurementImpact*measured 273 p.rtt = time.Duration((1-measurementImpact)*float64(p.rtt) + measurementImpact*float64(elapsed)) 274 275 p.log.Trace("Peer throughput measurements updated", 276 "hps", p.headerThroughput, "bps", p.blockThroughput, 277 "rps", p.receiptThroughput, "sps", p.stateThroughput, 278 "miss", len(p.lacking), "rtt", p.rtt) 279 } 280 281 // HeaderCapacity retrieves the peers header download allowance based on its 282 // previously discovered throughput. 283 func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int { 284 p.lock.RLock() 285 defer p.lock.RUnlock() 286 287 return int(math.Min(1+math.Max(1, p.headerThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxHeaderFetch))) 288 } 289 290 // BlockCapacity retrieves the peers block download allowance based on its 291 // previously discovered throughput. 292 func (p *peerConnection) BlockCapacity(targetRTT time.Duration) int { 293 p.lock.RLock() 294 defer p.lock.RUnlock() 295 296 return int(math.Min(1+math.Max(1, p.blockThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxBlockFetch))) 297 } 298 299 // ReceiptCapacity retrieves the peers receipt download allowance based on its 300 // previously discovered throughput. 301 func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int { 302 p.lock.RLock() 303 defer p.lock.RUnlock() 304 305 return int(math.Min(1+math.Max(1, p.receiptThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxReceiptFetch))) 306 } 307 308 // NodeDataCapacity retrieves the peers state download allowance based on its 309 // previously discovered throughput. 310 func (p *peerConnection) NodeDataCapacity(targetRTT time.Duration) int { 311 p.lock.RLock() 312 defer p.lock.RUnlock() 313 314 return int(math.Min(1+math.Max(1, p.stateThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxStateFetch))) 315 } 316 317 // MarkLacking appends a new entity to the set of items (blocks, receipts, states) 318 // that a peer is known not to have (i.e. have been requested before). If the 319 // set reaches its maximum allowed capacity, items are randomly dropped off. 320 func (p *peerConnection) MarkLacking(hash common.Hash) { 321 p.lock.Lock() 322 defer p.lock.Unlock() 323 324 for len(p.lacking) >= maxLackingHashes { 325 for drop := range p.lacking { 326 delete(p.lacking, drop) 327 break 328 } 329 } 330 p.lacking[hash] = struct{}{} 331 } 332 333 // Lacks retrieves whether the hash of a blockchain item is on the peers lacking 334 // list (i.e. whether we know that the peer does not have it). 335 func (p *peerConnection) Lacks(hash common.Hash) bool { 336 p.lock.RLock() 337 defer p.lock.RUnlock() 338 339 _, ok := p.lacking[hash] 340 return ok 341 } 342 343 // peerSet represents the collection of active peer participating in the chain 344 // download procedure. 345 type peerSet struct { 346 peers map[string]*peerConnection 347 newPeerFeed event.Feed 348 peerDropFeed event.Feed 349 lock sync.RWMutex 350 } 351 352 // newPeerSet creates a new peer set top track the active download sources. 353 func newPeerSet() *peerSet { 354 return &peerSet{ 355 peers: make(map[string]*peerConnection), 356 } 357 } 358 359 // SubscribeNewPeers subscribes to peer arrival events. 360 func (ps *peerSet) SubscribeNewPeers(ch chan<- *peerConnection) event.Subscription { 361 return ps.newPeerFeed.Subscribe(ch) 362 } 363 364 // SubscribePeerDrops subscribes to peer departure events. 365 func (ps *peerSet) SubscribePeerDrops(ch chan<- *peerConnection) event.Subscription { 366 return ps.peerDropFeed.Subscribe(ch) 367 } 368 369 // Reset iterates over the current peer set, and resets each of the known peers 370 // to prepare for a next batch of block retrieval. 371 func (ps *peerSet) Reset() { 372 ps.lock.RLock() 373 defer ps.lock.RUnlock() 374 375 for _, peer := range ps.peers { 376 peer.Reset() 377 } 378 } 379 380 // Register injects a new peer into the working set, or returns an error if the 381 // peer is already known. 382 // 383 // The method also sets the starting throughput values of the new peer to the 384 // average of all existing peers, to give it a realistic chance of being used 385 // for data retrievals. 386 func (ps *peerSet) Register(p *peerConnection) error { 387 // Retrieve the current median RTT as a sane default 388 p.rtt = ps.medianRTT() 389 390 // Register the new peer with some meaningful defaults 391 ps.lock.Lock() 392 if _, ok := ps.peers[p.id]; ok { 393 ps.lock.Unlock() 394 return errAlreadyRegistered 395 } 396 if len(ps.peers) > 0 { 397 p.headerThroughput, p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0, 0 398 399 for _, peer := range ps.peers { 400 peer.lock.RLock() 401 p.headerThroughput += peer.headerThroughput 402 p.blockThroughput += peer.blockThroughput 403 p.receiptThroughput += peer.receiptThroughput 404 p.stateThroughput += peer.stateThroughput 405 peer.lock.RUnlock() 406 } 407 p.headerThroughput /= float64(len(ps.peers)) 408 p.blockThroughput /= float64(len(ps.peers)) 409 p.receiptThroughput /= float64(len(ps.peers)) 410 p.stateThroughput /= float64(len(ps.peers)) 411 } 412 ps.peers[p.id] = p 413 ps.lock.Unlock() 414 415 ps.newPeerFeed.Send(p) 416 return nil 417 } 418 419 // Unregister removes a remote peer from the active set, disabling any further 420 // actions to/from that particular entity. 421 func (ps *peerSet) Unregister(id string) error { 422 ps.lock.Lock() 423 p, ok := ps.peers[id] 424 if !ok { 425 defer ps.lock.Unlock() 426 return errNotRegistered 427 } 428 delete(ps.peers, id) 429 ps.lock.Unlock() 430 431 ps.peerDropFeed.Send(p) 432 return nil 433 } 434 435 // Peer retrieves the registered peer with the given id. 436 func (ps *peerSet) Peer(id string) *peerConnection { 437 ps.lock.RLock() 438 defer ps.lock.RUnlock() 439 440 return ps.peers[id] 441 } 442 443 // Len returns if the current number of peers in the set. 444 func (ps *peerSet) Len() int { 445 ps.lock.RLock() 446 defer ps.lock.RUnlock() 447 448 return len(ps.peers) 449 } 450 451 // AllPeers retrieves a flat list of all the peers within the set. 452 func (ps *peerSet) AllPeers() []*peerConnection { 453 ps.lock.RLock() 454 defer ps.lock.RUnlock() 455 456 list := make([]*peerConnection, 0, len(ps.peers)) 457 for _, p := range ps.peers { 458 list = append(list, p) 459 } 460 return list 461 } 462 463 // HeaderIdlePeers retrieves a flat list of all the currently header-idle peers 464 // within the active peer set, ordered by their reputation. 465 func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) { 466 idle := func(p *peerConnection) bool { 467 return atomic.LoadInt32(&p.headerIdle) == 0 468 } 469 throughput := func(p *peerConnection) float64 { 470 p.lock.RLock() 471 defer p.lock.RUnlock() 472 return p.headerThroughput 473 } 474 return ps.idlePeers(62, 70, idle, throughput) 475 } 476 477 // BodyIdlePeers retrieves a flat list of all the currently body-idle peers within 478 // the active peer set, ordered by their reputation. 479 func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) { 480 idle := func(p *peerConnection) bool { 481 return atomic.LoadInt32(&p.blockIdle) == 0 482 } 483 throughput := func(p *peerConnection) float64 { 484 p.lock.RLock() 485 defer p.lock.RUnlock() 486 return p.blockThroughput 487 } 488 return ps.idlePeers(62, 70, idle, throughput) 489 } 490 491 // ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers 492 // within the active peer set, ordered by their reputation. 493 func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) { 494 idle := func(p *peerConnection) bool { 495 return atomic.LoadInt32(&p.receiptIdle) == 0 496 } 497 throughput := func(p *peerConnection) float64 { 498 p.lock.RLock() 499 defer p.lock.RUnlock() 500 return p.receiptThroughput 501 } 502 return ps.idlePeers(nrg70, nrg70, idle, throughput) 503 } 504 505 // NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle 506 // peers within the active peer set, ordered by their reputation. 507 func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) { 508 idle := func(p *peerConnection) bool { 509 return atomic.LoadInt32(&p.stateIdle) == 0 510 } 511 throughput := func(p *peerConnection) float64 { 512 p.lock.RLock() 513 defer p.lock.RUnlock() 514 return p.stateThroughput 515 } 516 return ps.idlePeers(nrg70, nrg70, idle, throughput) 517 } 518 519 // idlePeers retrieves a flat list of all currently idle peers satisfying the 520 // protocol version constraints, using the provided function to check idleness. 521 // The resulting set of peers are sorted by their measure throughput. 522 func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peerConnection) bool, throughput func(*peerConnection) float64) ([]*peerConnection, int) { 523 ps.lock.RLock() 524 defer ps.lock.RUnlock() 525 526 idle, total := make([]*peerConnection, 0, len(ps.peers)), 0 527 for _, p := range ps.peers { 528 if p.version >= minProtocol && p.version <= maxProtocol { 529 if idleCheck(p) { 530 idle = append(idle, p) 531 } 532 total++ 533 } 534 } 535 for i := 0; i < len(idle); i++ { 536 for j := i + 1; j < len(idle); j++ { 537 if throughput(idle[i]) < throughput(idle[j]) { 538 idle[i], idle[j] = idle[j], idle[i] 539 } 540 } 541 } 542 return idle, total 543 } 544 545 // medianRTT returns the median RTT of the peerset, considering only the tuning 546 // peers if there are more peers available. 547 func (ps *peerSet) medianRTT() time.Duration { 548 // Gather all the currently measured round trip times 549 ps.lock.RLock() 550 defer ps.lock.RUnlock() 551 552 rtts := make([]float64, 0, len(ps.peers)) 553 for _, p := range ps.peers { 554 p.lock.RLock() 555 rtts = append(rtts, float64(p.rtt)) 556 p.lock.RUnlock() 557 } 558 sort.Float64s(rtts) 559 560 median := rttMaxEstimate 561 if qosTuningPeers <= len(rtts) { 562 median = time.Duration(rtts[qosTuningPeers/2]) // Median of our tuning peers 563 } else if len(rtts) > 0 { 564 median = time.Duration(rtts[len(rtts)/2]) // Median of our connected peers (maintain even like this some baseline qos) 565 } 566 // Restrict the RTT into some QoS defaults, irrelevant of true RTT 567 if median < rttMinEstimate { 568 median = rttMinEstimate 569 } 570 if median > rttMaxEstimate { 571 median = rttMaxEstimate 572 } 573 return median 574 }